Ejemplo n.º 1
0
    def test_validate_logging_pods_and_pvc(self):
        """Validate logging pods and PVC"""

        # Wait for kibana pod to be ready
        kibana_pod = openshift_ops.get_pod_name_from_dc(
            self._master, self._logging_kibana_dc)
        openshift_ops.wait_for_pod_be_ready(self._master, kibana_pod)

        # Wait for fluentd pods to be ready
        fluentd_custom = [
            ":.status.desiredNumberScheduled",
            ":.spec.template.metadata.labels"
        ]
        count_and_selector = openshift_ops.oc_get_custom_resource(
            self._master, "ds", fluentd_custom, self._logging_fluentd_ds)
        selector = count_and_selector[1][4:].replace(":", "=")
        openshift_ops.wait_for_pods_be_ready(self._master,
                                             int(count_and_selector[0]),
                                             selector)

        # Wait for PVC to be bound and elasticsearch pod to be ready
        es_pod = openshift_ops.get_pod_name_from_dc(self._master,
                                                    self._logging_es_dc)
        pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
        pvc_name = openshift_ops.oc_get_custom_resource(
            self._master, "pod", pvc_custom, es_pod)[0]
        openshift_ops.verify_pvc_status_is_bound(self._master, pvc_name)
        openshift_ops.wait_for_pod_be_ready(self._master, es_pod)

        # Validate iscsi and multipath
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            self._logging_es_dc,
            heketi_server_url=self._registry_heketi_server_url,
            is_registry_gluster=True)
    def validate_volumes_and_blocks(self):
        """Validates PVC and block volumes generated through heketi and OCS
        """

        # verify pvc status is in "Bound" for all the pvc
        for pvc in self.pvc_list:
            verify_pvc_status_is_bound(self.oc_node,
                                       pvc,
                                       timeout=300,
                                       wait_step=10)

        # validate pvcs and pvs created on OCS
        match_pvc_and_pv(self.oc_node, self.prefix)

        # get list of block volumes using heketi
        heketi_block_volume_ids, heketi_block_volume_names = (
            self.get_heketi_block_volumes())

        # validate block volumes listed by heketi and pvs
        match_pv_and_heketi_block_volumes(self.oc_node,
                                          heketi_block_volume_ids, self.prefix)

        # validate block volumes listed by heketi and gluster
        match_heketi_and_gluster_block_volumes_by_prefix(
            heketi_block_volume_names, "%s_" % self.prefix)
Ejemplo n.º 3
0
    def test_validate_metrics_pods_and_pvc(self):
        """Validate metrics pods and PVC"""
        # Get cassandra pod name and PVC name
        hawkular_cassandra = get_pod_name_from_rc(
            self.master, self.metrics_rc_hawkular_cassandra)
        custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
        pvc_name = oc_get_custom_resource(self.master, "pod", custom,
                                          hawkular_cassandra)[0]

        # Wait for pods to get ready and PVC to be bound
        verify_pvc_status_is_bound(self.master, pvc_name)
        wait_for_pod_be_ready(self.master, hawkular_cassandra)
        hawkular_metrics = get_pod_name_from_rc(
            self.master, self.metrics_rc_hawkular_metrics)
        wait_for_pod_be_ready(self.master, hawkular_metrics)
        heapster = get_pod_name_from_rc(self.master, self.metrics_rc_heapster)
        wait_for_pod_be_ready(self.master, heapster)

        # Validate iscsi and multipath
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            self.metrics_rc_hawkular_cassandra,
            rtype='rc',
            heketi_server_url=self.registry_heketi_server_url,
            is_registry_gluster=True)
    def validate_volumes_and_blocks(self):
        """Validates PVC and block volumes generated through heketi and OCS
        """

        # verify pvc status is in "Bound" for all the pvc
        for pvc in self.pvc_list:
            verify_pvc_status_is_bound(
                self.oc_node, pvc, timeout=300, wait_step=10
            )

        # validate pvcs and pvs created on OCS
        match_pvc_and_pv(self.oc_node, self.prefix)

        # get list of block volumes using heketi
        heketi_block_volume_ids, heketi_block_volume_names = (
            self.get_heketi_block_volumes()
        )

        # validate block volumes listed by heketi and pvs
        match_pv_and_heketi_block_volumes(
            self.oc_node, heketi_block_volume_ids, self.prefix
        )

        # validate block volumes listed by heketi and gluster
        match_heketi_and_gluster_block_volumes_by_prefix(
            heketi_block_volume_names, "%s_" % self.prefix)
    def test_sc_glusterfile_missing_parameter(self, parameter):
        """Validate glusterfile storage with missing parameters"""
        node, sc = self.ocp_master_node[0], self.sc
        secret_name = self.create_secret()

        parameters = {'resturl': sc['resturl'], 'restuser': sc['restuser']}
        if parameter == 'secretName':
            parameters['secretName'] = secret_name
        elif parameter == 'secretNamespace':
            parameters['secretNamespace'] = sc['secretnamespace']

        sc_name = oc_create_sc(node, **parameters)
        self.addCleanup(oc_delete, node, 'sc', sc_name)

        # Create PVC
        pvc_name = oc_create_pvc(node, sc_name)
        self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name)
        self.addCleanup(oc_delete, node, 'pvc', pvc_name)

        # Wait for event with error
        wait_for_events(node,
                        obj_name=pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_reason='ProvisioningFailed')

        # Verify PVC did not get bound
        with self.assertRaises(ExecutionError):
            verify_pvc_status_is_bound(node, pvc_name, timeout=1)
    def test_usage_of_default_storage_class(self):
        """Validate PVs creation for SC with default custom volname prefix"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        # Unset 'default' option from all the existing Storage Classes
        unset_sc_annotation_cmd = (
            r"""oc annotate sc %s """
            r""""storageclass%s.kubernetes.io/is-default-class"-""")
        set_sc_annotation_cmd = (
            r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
            r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
        get_sc_cmd = (
            r'oc get sc --no-headers '
            r'-o=custom-columns=:.metadata.name,'
            r':".metadata.annotations.storageclass\.'
            r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
            r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
        sc_list = self.cmd_run(get_sc_cmd)
        for sc in sc_list.split("\n"):
            sc = sc.split()
            if len(sc) != 3:
                self.skipTest("Unexpected output for list of storage classes. "
                              "Following is expected to contain 3 keys:: %s" %
                              sc)
            for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
                if value == '<none>':
                    continue
                self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
                self.addCleanup(
                    self.cmd_run,
                    set_sc_annotation_cmd % (sc[0], api_type, value))

        # Create new SC
        prefix = "autotests-default-sc"
        self.create_storage_class(sc_name_prefix=prefix)

        # Make new SC be the default one and sleep for 1 sec to avoid races
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
        time.sleep(1)

        # Create PVC without specification of SC
        pvc_name = oc_create_pvc(self.node,
                                 sc_name=None,
                                 pvc_name_prefix=prefix)
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)

        # Wait for successful creation of PVC and check its SC
        verify_pvc_status_is_bound(self.node, pvc_name)
        get_sc_of_pvc_cmd = ("oc get pvc %s --no-headers "
                             "-o=custom-columns=:.spec.storageClassName" %
                             pvc_name)
        out = self.cmd_run(get_sc_of_pvc_cmd)
        self.assertEqual(out, self.sc_name)
    def test_usage_of_default_storage_class(self):
        """Validate PVs creation for SC with default custom volname prefix"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        # Unset 'default' option from all the existing Storage Classes
        unset_sc_annotation_cmd = (
            r"""oc annotate sc %s """
            r""""storageclass%s.kubernetes.io/is-default-class"-""")
        set_sc_annotation_cmd = (
            r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
            r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
        get_sc_cmd = (
            r'oc get sc --no-headers '
            r'-o=custom-columns=:.metadata.name,'
            r':".metadata.annotations.storageclass\.'
            r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
            r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
        sc_list = self.cmd_run(get_sc_cmd)
        for sc in sc_list.split("\n"):
            sc = sc.split()
            if len(sc) != 3:
                self.skipTest(
                    "Unexpected output for list of storage classes. "
                    "Following is expected to contain 3 keys:: %s" % sc)
            for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
                if value == '<none>':
                    continue
                self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
                self.addCleanup(
                    self.cmd_run,
                    set_sc_annotation_cmd % (sc[0], api_type, value))

        # Create new SC
        prefix = "autotests-default-sc"
        self.create_storage_class(sc_name_prefix=prefix)

        # Make new SC be the default one and sleep for 1 sec to avoid races
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
        time.sleep(1)

        # Create PVC without specification of SC
        pvc_name = oc_create_pvc(
            self.node, sc_name=None, pvc_name_prefix=prefix)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)

        # Wait for successful creation of PVC and check its SC
        verify_pvc_status_is_bound(self.node, pvc_name)
        get_sc_of_pvc_cmd = (
            "oc get pvc %s --no-headers "
            "-o=custom-columns=:.spec.storageClassName" % pvc_name)
        out = self.cmd_run(get_sc_of_pvc_cmd)
        self.assertEqual(out, self.sc_name)
    def create_and_wait_for_pvcs(self, pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1, sc_name=None,
                                 timeout=120, wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                node, sc_name, pvc_name_prefix=pvc_name_prefix,
                pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(
                wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete, node, 'pv', pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(
                        node, 'pv', custom, pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete, node, 'pvc', pvc_name,
                                raise_on_absence=False)

        return pvc_names
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
            self):
        """Verify that block volume creation fails when we create block
        volume of size greater than the default size of BHV.
        Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        with self.assertRaises(ExecutionError):
            # create a block vol greater than default BHV size
            bvol_info = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                (default_bhv_size + 1), json=True)
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, bvol_info['id'])

        sc_name = self.create_storage_class()

        # create a block pvc greater than default BHV size
        pvc_name = oc_create_pvc(
            self.node, sc_name, pvc_size=(default_bhv_size + 1))
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='ProvisioningFailed')

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            (default_bhv_size + reserve_size + 2), block=True,
            json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        # Cleanup PVC before block hosting volume to avoid failures
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        verify_pvc_status_is_bound(self.node, pvc_name)
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
            self):
        """Verify that block volume creation fails when we create block
        volume of size greater than the default size of BHV.
        Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        with self.assertRaises(AssertionError):
            # create a block vol greater than default BHV size
            bvol_info = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                (default_bhv_size + 1), json=True)
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, bvol_info['id'])

        sc_name = self.create_storage_class()

        # create a block pvc greater than default BHV size
        pvc_name = oc_create_pvc(
            self.node, sc_name, pvc_size=(default_bhv_size + 1))
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='ProvisioningFailed')

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            (default_bhv_size + reserve_size + 2), block=True,
            json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        # Cleanup PVC before block hosting volume to avoid failures
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        verify_pvc_status_is_bound(self.node, pvc_name)
    def setUp(self):
        """Deploys, Verifies and adds resources required for testcases
           in cleanup method
        """
        self.oc_node = self.ocp_master_node[0]
        self.prefix = "autotest-%s" % utils.get_random_str()
        _storage_class = self.storage_classes.get(
            'storage_class2', self.storage_classes.get('block_storage_class'))
        self.provisioner = _storage_class["provisioner"]
        self.restsecretnamespace = _storage_class["restsecretnamespace"]
        self.restuser = _storage_class["restuser"]
        self.resturl = _storage_class["resturl"]

        # using pvc size count as 1 by default
        self.pvcsize = 1

        # using pvc count as 10 by default
        self.pvccount = 10

        # create gluster block storage class, PVC and user app pod
        self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
            self.deploy_resouces())

        # verify storage class
        oc_get_yaml(self.oc_node, "sc", self.sc_name)

        # verify pod creation, it's state and get the pod name
        self.pod_name = get_pod_name_from_dc(self.oc_node,
                                             self.dc_name,
                                             timeout=180,
                                             wait_step=3)
        wait_for_pod_be_ready(self.oc_node,
                              self.pod_name,
                              timeout=180,
                              wait_step=3)
        verify_pvc_status_is_bound(self.oc_node, self.pvc_name)

        # create pvc's to test
        self.pvc_list = []
        for pvc in range(self.pvccount):
            test_pvc_name = oc_create_pvc(self.oc_node,
                                          self.sc_name,
                                          pvc_name_prefix=self.prefix,
                                          pvc_size=self.pvcsize)
            self.pvc_list.append(test_pvc_name)
            self.addCleanup(wait_for_resource_absence,
                            self.oc_node,
                            "pvc",
                            test_pvc_name,
                            timeout=600,
                            interval=10)

        for pvc_name in self.pvc_list:
            self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
    def setUp(self):
        """Deploys, Verifies and adds resources required for testcases
           in cleanup method
        """
        self.oc_node = self.ocp_master_node[0]
        self.prefix = "autotest-%s" % utils.get_random_str()
        _storage_class = self.storage_classes.get(
            'storage_class2',
            self.storage_classes.get('block_storage_class'))
        self.provisioner = _storage_class["provisioner"]
        self.restsecretnamespace = _storage_class["restsecretnamespace"]
        self.restuser = _storage_class["restuser"]
        self.resturl = _storage_class["resturl"]

        # using pvc size count as 1 by default
        self.pvcsize = 1

        # using pvc count as 10 by default
        self.pvccount = 10

        # create gluster block storage class, PVC and user app pod
        self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
            self.deploy_resouces()
        )

        # verify storage class
        oc_get_yaml(self.oc_node, "sc", self.sc_name)

        # verify pod creation, it's state and get the pod name
        self.pod_name = get_pod_name_from_dc(
            self.oc_node, self.dc_name, timeout=180, wait_step=3
        )
        wait_for_pod_be_ready(
            self.oc_node, self.pod_name, timeout=180, wait_step=3
        )
        verify_pvc_status_is_bound(self.oc_node, self.pvc_name)

        # create pvc's to test
        self.pvc_list = []
        for pvc in range(self.pvccount):
            test_pvc_name = oc_create_pvc(
                self.oc_node, self.sc_name,
                pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
            )
            self.pvc_list.append(test_pvc_name)
            self.addCleanup(
                wait_for_resource_absence, self.oc_node, "pvc", test_pvc_name,
                timeout=600, interval=10
            )

        for pvc_name in self.pvc_list:
            self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
Ejemplo n.º 13
0
    def validate_volumes_and_blocks(self):
        """Validates PVC and block volumes generated through heketi and OCS
        """
        heketi_operations = heketi_server_operations_list(
            self.heketi_client_node,
            self.heketi_server_url,
            secret=self.heketi_cli_key,
            user=self.heketi_cli_user)

        for heketi_operation in heketi_operations:
            if heketi_operation["status"] == "failed":
                heketi_server_operation_cleanup(self.heketi_client_node,
                                                self.heketi_server_url,
                                                heketi_operation["id"],
                                                secret=self.heketi_cli_key,
                                                user=self.heketi_cli_user)

        # verify pvc status is in "Bound" for all the pvc
        for pvc in self.pvc_list:
            verify_pvc_status_is_bound(self.oc_node,
                                       pvc,
                                       timeout=300,
                                       wait_step=10)

        # validate pvcs and pvs created on OCS
        match_pvc_and_pv(self.oc_node, self.prefix)

        # get list of block volumes using heketi
        h_blockvol_list = heketi_blockvolume_list_by_name_prefix(
            self.heketi_client_node, self.heketi_server_url, self.prefix)

        # validate block volumes listed by heketi and pvs
        heketi_blockvolume_ids = sorted([bv[0] for bv in h_blockvol_list])
        match_pv_and_heketi_block_volumes(self.oc_node, heketi_blockvolume_ids,
                                          self.prefix)

        # validate block volumes listed by heketi and gluster
        heketi_blockvolume_names = sorted(
            [bv[1].replace("%s_" % self.prefix, "") for bv in h_blockvol_list])
        match_heketi_and_gluster_block_volumes_by_prefix(
            heketi_blockvolume_names, "%s_" % self.prefix)
 def _dynamic_provisioning_block_with_bhv_cleanup(
         self, sc_name, pvc_size, bhv_list):
     """Dynamic provisioning for glusterblock with BHV Cleanup"""
     h_node, h_url = self.heketi_client_node, self.heketi_server_url
     pvc_name = oc_create_pvc(self.node, sc_name, pvc_size=pvc_size)
     try:
         verify_pvc_status_is_bound(self.node, pvc_name)
         pv_name = get_pv_name_from_pvc(self.node, pvc_name)
         custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
         bvol_id = oc_get_custom_resource(
             self.node, 'pv', custom, pv_name)[0]
         bhv_id = heketi_blockvolume_info(
             h_node, h_url, bvol_id, json=True)['blockhostingvolume']
         if bhv_id not in bhv_list:
             self.addCleanup(
                 heketi_volume_delete, h_node, h_url, bhv_id)
     finally:
         self.addCleanup(
             wait_for_resource_absence, self.node, 'pvc', pvc_name)
         self.addCleanup(
             oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=True)
Ejemplo n.º 15
0
    def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
        """Validate reuse of volume space after deletion of PVCs"""
        min_storage_gb = 10

        # Set arbiter:disabled tags to the first 2 nodes
        data_nodes = []
        biggest_disks = []
        self.assertGreater(len(self.node_id_list), 2)
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            biggest_disk_free_space = 0
            for device in node_info['devices']:
                disk_free_space = int(device['storage']['free'])
                if disk_free_space < (min_storage_gb * 1024**2):
                    self.skipTest("Devices are expected to have more than "
                                  "%sGb of free space" % min_storage_gb)
                if disk_free_space > biggest_disk_free_space:
                    biggest_disk_free_space = disk_free_space
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'],
                    'disabled',
                    revert_to=device.get('tags', {}).get('arbiter'))
            biggest_disks.append(biggest_disk_free_space)
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      node_id,
                                                      'disabled',
                                                      revert_to=node_info.get(
                                                          'tags',
                                                          {}).get('arbiter'))
            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        arbiter_nodes = []
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'],
                    'required',
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      node_id,
                                                      'required',
                                                      revert_to=node_info.get(
                                                          'tags',
                                                          {}).get('arbiter'))
            arbiter_nodes.append(node_info)

        # Calculate size and amount of volumes to be created
        pvc_size = int(min(biggest_disks) / 1024**2)
        pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create and delete 3 small volumes concurrently
        pvc_names = self.create_and_wait_for_pvcs(
            pvc_size=int(pvc_size / 3),
            pvc_name_prefix='arbiter-pvc',
            pvc_amount=3,
            sc_name=self.sc_name)

        for pvc_name in pvc_names:
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
        for pvc_name in pvc_names:
            openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Create and delete big volumes in a loop
        for i in range(pvc_amount):
            pvc_name = openshift_ops.oc_create_pvc(
                self.node,
                self.sc_name,
                pvc_name_prefix='arbiter-pvc',
                pvc_size=pvc_size)
            try:
                openshift_ops.verify_pvc_status_is_bound(
                    self.node, pvc_name, 300, 10)
            except Exception:
                self.addCleanup(openshift_ops.wait_for_resource_absence,
                                self.node, 'pvc', pvc_name)
                self.addCleanup(openshift_ops.oc_delete, self.node, 'pvc',
                                pvc_name)
                raise
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
            openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)
    def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
        """Validate PVC with glusterblock creation when heketi pod is down"""
        datafile_path = '/mnt/fake_file_for_%s' % self.id()

        # Create DC with attached PVC
        sc_name = self.create_storage_class()
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name)

        # Write test data
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        # Create second PVC
        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix='autotest-block2', sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name
        )

        # Create second app POD
        app_2_dc_name = oc_create_app_dc_with_io(self.node, app_2_pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', app_2_dc_name)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.node, app_2_dc_name, 0)
        app_2_pod_name = get_pod_name_from_dc(self.node, app_2_dc_name)

        # Bring Heketi pod back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=150, wait_step=3)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
    def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
        """Validate dynamic provisioning for gluster file when heketi pod down
        """
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        sc_name = self.create_storage_class()

        # Create PVC
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix="autotest-file", sc_name=sc_name)

        # Create app POD with attached volume
        app_1_pod_name = oc_create_tiny_pod_with_volume(
            self.node,
            app_1_pvc_name,
            "test-pvc-mount-on-app-pod",
            mount_path=mount_path,
            image=self.io_container_image_cirros)
        self.addCleanup(wait_for_resource_absence, self.node, 'pod',
                        app_1_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(self.node,
                              app_1_pod_name,
                              timeout=60,
                              wait_step=2)

        # Write data to the app POD
        write_data_cmd = ("dd if=/dev/urandom of=%s bs=1K count=100" %
                          datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(self.node,
                                               self.heketi_dc_name,
                                               timeout=10,
                                               wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        app_2_pvc_name = oc_create_pvc(self.node,
                                       pvc_name_prefix="autotest-file2",
                                       sc_name=sc_name)
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc',
                        app_2_pvc_name)
        self.addCleanup(oc_delete,
                        self.node,
                        'pvc',
                        app_2_pvc_name,
                        raise_on_absence=False)

        # Create second app POD
        app_2_pod_name = oc_create_tiny_pod_with_volume(
            self.node,
            app_2_pvc_name,
            "test-pvc-mount-on-app-pod",
            mount_path=mount_path,
            image=self.io_container_image_cirros)
        self.addCleanup(wait_for_resource_absence, self.node, 'pod',
                        app_2_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)

        # Bring Heketi POD back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(self.node,
                                                   self.heketi_dc_name,
                                                   timeout=10,
                                                   wait_step=2)
        wait_for_pod_be_ready(self.node,
                              new_heketi_pod_name,
                              wait_step=5,
                              timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(self.node,
                              app_2_pod_name,
                              timeout=60,
                              wait_step=2)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
    def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
        """Validate PVC with glusterblock creation when heketi pod is down"""
        datafile_path = '/mnt/fake_file_for_%s' % self.id()

        # Create DC with attached PVC
        sc_name = self.create_storage_class()
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name)

        # Write test data
        write_data_cmd = ("dd if=/dev/urandom of=%s bs=1K count=100" %
                          datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(self.node,
                                               self.heketi_dc_name,
                                               timeout=10,
                                               wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        # Create second PVC
        app_2_pvc_name = oc_create_pvc(self.node,
                                       pvc_name_prefix='autotest-block2',
                                       sc_name=sc_name)
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc',
                        app_2_pvc_name)
        self.addCleanup(oc_delete, self.node, 'pvc', app_2_pvc_name)

        # Create second app POD
        app_2_dc_name = oc_create_app_dc_with_io(self.node, app_2_pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', app_2_dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, app_2_dc_name,
                        0)
        app_2_pod_name = get_pod_name_from_dc(self.node, app_2_dc_name)

        # Bring Heketi pod back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(self.node,
                                                   self.heketi_dc_name,
                                                   timeout=10,
                                                   wait_step=2)
        wait_for_pod_be_ready(self.node,
                              new_heketi_pod_name,
                              wait_step=5,
                              timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(self.node,
                              app_2_pod_name,
                              timeout=150,
                              wait_step=3)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
    def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
        """Validate reuse of volume space after deletion of PVCs"""
        min_storage_gb = 10

        # Set arbiter:disabled tags to the first 2 nodes
        data_nodes = []
        biggest_disks = []
        self.assertGreater(len(self.node_id_list), 2)
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            biggest_disk_free_space = 0
            for device in node_info['devices']:
                disk_free_space = int(device['storage']['free'])
                if disk_free_space < (min_storage_gb * 1024**2):
                    self.skipTest(
                        "Devices are expected to have more than "
                        "%sGb of free space" % min_storage_gb)
                if disk_free_space > biggest_disk_free_space:
                    biggest_disk_free_space = disk_free_space
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], 'disabled',
                    revert_to=device.get('tags', {}).get('arbiter'))
            biggest_disks.append(biggest_disk_free_space)
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled',
                revert_to=node_info.get('tags', {}).get('arbiter'))
            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        arbiter_nodes = []
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], 'required',
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'required',
                revert_to=node_info.get('tags', {}).get('arbiter'))
            arbiter_nodes.append(node_info)

        # Calculate size and amount of volumes to be created
        pvc_size = int(min(biggest_disks) / 1024**2)
        pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create and delete 3 small volumes concurrently
        pvc_names = []
        for i in range(3):
            pvc_name = oc_create_pvc(
                self.node, self.sc_name, pvc_name_prefix='arbiter-pvc',
                pvc_size=int(pvc_size / 3))
            pvc_names.append(pvc_name)
        exception_exists = False
        for pvc_name in pvc_names:
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                for pvc_name in pvc_names:
                    self.addCleanup(
                        wait_for_resource_absence, self.node, 'pvc', pvc_name)
                for pvc_name in pvc_names:
                    self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                exception_exists = True
        if exception_exists:
            raise
        for pvc_name in pvc_names:
            oc_delete(self.node, 'pvc', pvc_name)
        for pvc_name in pvc_names:
            wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Create and delete big volumes in a loop
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                self.node, self.sc_name, pvc_name_prefix='arbiter-pvc',
                pvc_size=pvc_size)
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                self.addCleanup(
                    wait_for_resource_absence, self.node, 'pvc', pvc_name)
                self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                raise
            oc_delete(self.node, 'pvc', pvc_name)
            wait_for_resource_absence(self.node, 'pvc', pvc_name)
    def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
        """Validate dynamic provisioning for gluster file when heketi pod down
        """
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        sc_name = self.create_storage_class()

        # Create PVC
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix="autotest-file", sc_name=sc_name
        )

        # Create app POD with attached volume
        app_1_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(
            self.node, app_1_pod_name, timeout=60, wait_step=2)

        # Write data to the app POD
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix="autotest-file2", sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name, raise_on_absence=False
        )

        # Create second app POD
        app_2_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)

        # Bring Heketi POD back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=60, wait_step=2)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
Ejemplo n.º 21
0
    def create_and_wait_for_pvcs(self,
                                 pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1,
                                 sc_name=None,
                                 timeout=120,
                                 wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(node,
                                     sc_name,
                                     pvc_name_prefix=pvc_name_prefix,
                                     pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete,
                                    node,
                                    'pv',
                                    pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(node, 'pv', custom,
                                                    pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete,
                                node,
                                'pvc',
                                pvc_name,
                                raise_on_absence=False)

        return pvc_names