def setUp(self):
        """Deploys, Verifies and adds resources required for testcases
           in cleanup method
        """
        self.oc_node = self.ocp_master_node[0]
        self.gluster_pod = get_ocp_gluster_pod_names(self.oc_node)[0]
        self.gluster_pod_obj = podcmd.Pod(self.oc_node, self.gluster_pod)

        # prefix used to create resources, generating using glusto_test_id
        # which uses time and date of test case
        self.prefix = "autotest-%s" % (self.glustotest_run_id.replace("_", ""))

        _storage_class = self.storage_classes.get(
            'storage_class2', self.storage_classes.get('block_storage_class'))
        self.provisioner = _storage_class["provisioner"]
        self.restsecretnamespace = _storage_class["restsecretnamespace"]
        self.restuser = _storage_class["restuser"]
        self.resturl = _storage_class["resturl"]

        # using pvc size count as 1 by default
        self.pvcsize = 1

        # using pvc count as 10 by default
        self.pvccount = 10

        # create gluster block storage class, PVC and user app pod
        self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
            self.deploy_resouces())

        # verify storage class
        oc_get_yaml(self.oc_node, "sc", self.sc_name)

        # verify pod creation, it's state and get the pod name
        self.pod_name = get_pod_name_from_dc(self.oc_node,
                                             self.dc_name,
                                             timeout=180,
                                             wait_step=3)
        wait_for_pod_be_ready(self.oc_node,
                              self.pod_name,
                              timeout=180,
                              wait_step=3)
        verify_pvc_status_is_bound(self.oc_node, self.pvc_name)

        # create pvc's to test
        self.pvc_list = []
        for pvc in range(self.pvccount):
            test_pvc_name = oc_create_pvc(self.oc_node,
                                          self.sc_name,
                                          pvc_name_prefix=self.prefix,
                                          pvc_size=self.pvcsize)
            self.pvc_list.append(test_pvc_name)
            self.addCleanup(wait_for_resource_absence,
                            self.oc_node,
                            "pvc",
                            test_pvc_name,
                            timeout=600,
                            interval=10)

        for pvc_name in self.pvc_list:
            self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
Esempio n. 2
0
    def test_usage_of_default_storage_class(self):
        """Validate PVs creation for SC with default custom volname prefix"""

        # Unset 'default' option from all the existing Storage Classes
        unset_sc_annotation_cmd = (
            r"""oc annotate sc %s """
            r""""storageclass%s.kubernetes.io/is-default-class"-""")
        set_sc_annotation_cmd = (
            r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
            r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
        get_sc_cmd = (
            r'oc get sc --no-headers '
            r'-o=custom-columns=:.metadata.name,'
            r':".metadata.annotations.storageclass\.'
            r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
            r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
        sc_list = self.cmd_run(get_sc_cmd)
        for sc in sc_list.split("\n"):
            sc = sc.split()
            if len(sc) != 3:
                self.skipTest(
                    "Unexpected output for list of storage classes. "
                    "Following is expected to contain 3 keys:: %s" % sc)
            for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
                if value == '<none>':
                    continue
                self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
                self.addCleanup(
                    self.cmd_run,
                    set_sc_annotation_cmd % (sc[0], api_type, value))

        # Create new SC
        prefix = "autotests-default-sc"
        self.create_storage_class(sc_name_prefix=prefix)

        # Make new SC be the default one and sleep for 1 sec to avoid races
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
        time.sleep(1)

        # Create PVC without specification of SC
        pvc_name = oc_create_pvc(
            self.node, sc_name=None, pvc_name_prefix=prefix)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)

        # Wait for successful creation of PVC and check its SC
        verify_pvc_status_is_bound(self.node, pvc_name)
        get_sc_of_pvc_cmd = (
            "oc get pvc %s --no-headers "
            "-o=custom-columns=:.spec.storageClassName" % pvc_name)
        out = self.cmd_run(get_sc_of_pvc_cmd)
        self.assertEqual(out, self.sc_name)
    def deploy_resouces(self):
        """Deploys required resources storage class, pvc and user app
           with continous I/O runnig

        Returns:
            sc_name (str): deployed storage class name
            pvc_name (str): deployed persistent volume claim name
            dc_name (str): deployed deployment config name
            secretname (str): created secret file name
        """
        secretname = oc_create_secret(self.oc_node,
                                      namespace=self.restsecretnamespace,
                                      data_key=self.heketi_cli_key,
                                      secret_type=self.provisioner)
        self.addCleanup(oc_delete, self.oc_node, 'secret', secretname)

        sc_name = oc_create_sc(self.oc_node,
                               sc_name_prefix=self.prefix,
                               provisioner=self.provisioner,
                               resturl=self.resturl,
                               restuser=self.restuser,
                               restsecretnamespace=self.restsecretnamespace,
                               restsecretname=secretname,
                               volumenameprefix=self.prefix)
        self.addCleanup(oc_delete, self.oc_node, "sc", sc_name)

        pvc_name = oc_create_pvc(self.oc_node,
                                 sc_name,
                                 pvc_name_prefix=self.prefix,
                                 pvc_size=self.pvcsize)
        self.addCleanup(wait_for_resource_absence,
                        self.oc_node,
                        "pvc",
                        pvc_name,
                        timeout=120,
                        interval=5)
        self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)

        dc_name = oc_create_app_dc_with_io(self.oc_node,
                                           pvc_name,
                                           dc_name_prefix=self.prefix)
        self.addCleanup(oc_delete, self.oc_node, "dc", dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.oc_node, dc_name, 0)

        return sc_name, pvc_name, dc_name, secretname
Esempio n. 4
0
    def create_sc_with_parameter(self, vol_type, success=False, parameter={}):
        """creates storage class, pvc and validates event

        Args:
            vol_type (str): storage type either gluster file or block
            success (bool): if True check for successfull else failure
                            for pvc creation event
            parameter (dict): dictionary with storage class parameters
        """
        if vol_type == "glusterfile":
            sc = self.storage_classes.get(
                'storage_class1',
                self.storage_classes.get('file_storage_class'))

            # Create secret file for usage in storage class
            self.secret_name = oc_create_secret(
                self.ocp_master_node[0],
                namespace=sc.get('secretnamespace', 'default'),
                data_key=self.heketi_cli_key,
                secret_type=sc.get('provisioner', 'kubernetes.io/glusterfs'))
            self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
                            self.secret_name)
            sc_parameter = {
                "secretnamespace": sc['secretnamespace'],
                "secretname": self.secret_name,
                "volumetype": "replicate:3"
            }
        elif vol_type == "glusterblock":
            sc = self.storage_classes.get(
                'storage_class2',
                self.storage_classes.get('block_storage_class'))

            # Create secret file for usage in storage class
            self.secret_name = oc_create_secret(
                self.ocp_master_node[0],
                namespace=sc.get('restsecretnamespace', 'default'),
                data_key=self.heketi_cli_key,
                secret_type=sc.get('provisioner', 'gluster.org/glusterblock'))
            self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
                            self.secret_name)
            sc_parameter = {
                "provisioner": "gluster.org/glusterblock",
                "restsecretnamespace": sc['restsecretnamespace'],
                "restsecretname": self.secret_name,
                "hacount": sc['hacount']
            }
        else:
            err_msg = "invalid vol_type %s" % vol_type
            g.log.error(err_msg)
            raise AssertionError(err_msg)
        sc_parameter['resturl'] = sc['resturl']
        sc_parameter['restuser'] = sc['restuser']
        sc_parameter.update(parameter)

        # Create storage class
        self.sc_name = oc_create_sc(self.ocp_master_node[0], **sc_parameter)
        self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)

        # Create PVC
        self.pvc_name = oc_create_pvc(self.ocp_client[0], self.sc_name)
        self.addCleanup(wait_for_resource_absence, self.ocp_master_node[0],
                        'pvc', self.pvc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
                        self.pvc_name)

        # Wait for event with error
        event_reason = 'ProvisioningFailed'
        if success:
            event_reason = 'ProvisioningSucceeded'
        wait_for_events(self.ocp_master_node[0],
                        obj_name=self.pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_reason=event_reason)
Esempio n. 5
0
    def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
        """Validate dynamic provisioning for gluster file when heketi pod down
        """
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        sc_name = self.create_storage_class()

        # Create PVC
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix="autotest-file", sc_name=sc_name
        )

        # Create app POD with attached volume
        app_1_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(
            self.node, app_1_pod_name, timeout=60, wait_step=2)

        # Write data to the app POD
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix="autotest-file2", sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name, raise_on_absence=False
        )

        # Create second app POD
        app_2_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)

        # Bring Heketi POD back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=60, wait_step=2)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
Esempio n. 6
0
    def create_and_wait_for_pvcs(self,
                                 pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1,
                                 sc_name=None,
                                 timeout=120,
                                 wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(node,
                                     sc_name,
                                     pvc_name_prefix=pvc_name_prefix,
                                     pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            reclaim_policy = oc_get_custom_resource(node, 'sc',
                                                    ':.reclaimPolicy',
                                                    sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete,
                                    node,
                                    'pv',
                                    pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(node, 'pv', custom,
                                                    pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete,
                                node,
                                'pvc',
                                pvc_name,
                                raise_on_absence=False)

        return pvc_names
    def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
        """Validate reuse of volume space after deletion of PVCs"""
        min_storage_gb = 10

        # Set arbiter:disabled tags to the first 2 nodes
        data_nodes = []
        biggest_disks = []
        self.assertGreater(len(self.node_id_list), 2)
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            biggest_disk_free_space = 0
            for device in node_info['devices']:
                disk_free_space = int(device['storage']['free'])
                if disk_free_space < (min_storage_gb * 1024**2):
                    self.skipTest("Devices are expected to have more than "
                                  "%sGb of free space" % min_storage_gb)
                if disk_free_space > biggest_disk_free_space:
                    biggest_disk_free_space = disk_free_space
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'],
                    'disabled',
                    revert_to=device.get('tags', {}).get('arbiter'))
            biggest_disks.append(biggest_disk_free_space)
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      node_id,
                                                      'disabled',
                                                      revert_to=node_info.get(
                                                          'tags',
                                                          {}).get('arbiter'))
            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        arbiter_nodes = []
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'],
                    'required',
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      node_id,
                                                      'required',
                                                      revert_to=node_info.get(
                                                          'tags',
                                                          {}).get('arbiter'))
            arbiter_nodes.append(node_info)

        # Calculate size and amount of volumes to be created
        pvc_size = int(min(biggest_disks) / 1024**2)
        pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create and delete 3 small volumes concurrently
        pvc_names = []
        for i in range(3):
            pvc_name = oc_create_pvc(self.node,
                                     self.sc_name,
                                     pvc_name_prefix='arbiter-pvc',
                                     pvc_size=int(pvc_size / 3))
            pvc_names.append(pvc_name)
        exception_exists = False
        for pvc_name in pvc_names:
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                for pvc_name in pvc_names:
                    self.addCleanup(wait_for_resource_absence, self.node,
                                    'pvc', pvc_name)
                for pvc_name in pvc_names:
                    self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                exception_exists = True
        if exception_exists:
            raise
        for pvc_name in pvc_names:
            oc_delete(self.node, 'pvc', pvc_name)
        for pvc_name in pvc_names:
            wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Create and delete big volumes in a loop
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(self.node,
                                     self.sc_name,
                                     pvc_name_prefix='arbiter-pvc',
                                     pvc_size=pvc_size)
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                self.addCleanup(wait_for_resource_absence, self.node, 'pvc',
                                pvc_name)
                self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                raise
            oc_delete(self.node, 'pvc', pvc_name)
            wait_for_resource_absence(self.node, 'pvc', pvc_name)