def test_usage_of_default_storage_class(self):
        """Validate PVs creation for SC with default custom volname prefix"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        # Unset 'default' option from all the existing Storage Classes
        unset_sc_annotation_cmd = (
            r"""oc annotate sc %s """
            r""""storageclass%s.kubernetes.io/is-default-class"-""")
        set_sc_annotation_cmd = (
            r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
            r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
        get_sc_cmd = (
            r'oc get sc --no-headers '
            r'-o=custom-columns=:.metadata.name,'
            r':".metadata.annotations.storageclass\.'
            r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
            r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
        sc_list = self.cmd_run(get_sc_cmd)
        for sc in sc_list.split("\n"):
            sc = sc.split()
            if len(sc) != 3:
                self.skipTest(
                    "Unexpected output for list of storage classes. "
                    "Following is expected to contain 3 keys:: %s" % sc)
            for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
                if value == '<none>':
                    continue
                self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
                self.addCleanup(
                    self.cmd_run,
                    set_sc_annotation_cmd % (sc[0], api_type, value))

        # Create new SC
        prefix = "autotests-default-sc"
        self.create_storage_class(sc_name_prefix=prefix)

        # Make new SC be the default one and sleep for 1 sec to avoid races
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
        time.sleep(1)

        # Create PVC without specification of SC
        pvc_name = oc_create_pvc(
            self.node, sc_name=None, pvc_name_prefix=prefix)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)

        # Wait for successful creation of PVC and check its SC
        verify_pvc_status_is_bound(self.node, pvc_name)
        get_sc_of_pvc_cmd = (
            "oc get pvc %s --no-headers "
            "-o=custom-columns=:.spec.storageClassName" % pvc_name)
        out = self.cmd_run(get_sc_of_pvc_cmd)
        self.assertEqual(out, self.sc_name)
    def create_and_wait_for_pvcs(self, pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1, sc_name=None,
                                 timeout=120, wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                node, sc_name, pvc_name_prefix=pvc_name_prefix,
                pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(
                wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete, node, 'pv', pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(
                        node, 'pv', custom, pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete, node, 'pvc', pvc_name,
                                raise_on_absence=False)

        return pvc_names
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
            self):
        """Verify that block volume creation fails when we create block
        volume of size greater than the default size of BHV.
        Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        with self.assertRaises(ExecutionError):
            # create a block vol greater than default BHV size
            bvol_info = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                (default_bhv_size + 1), json=True)
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, bvol_info['id'])

        sc_name = self.create_storage_class()

        # create a block pvc greater than default BHV size
        pvc_name = oc_create_pvc(
            self.node, sc_name, pvc_size=(default_bhv_size + 1))
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='ProvisioningFailed')

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            (default_bhv_size + reserve_size + 2), block=True,
            json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        # Cleanup PVC before block hosting volume to avoid failures
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        verify_pvc_status_is_bound(self.node, pvc_name)
    def setUp(self):
        """Deploys, Verifies and adds resources required for testcases
           in cleanup method
        """
        self.oc_node = self.ocp_master_node[0]
        self.prefix = "autotest-%s" % utils.get_random_str()
        _storage_class = self.storage_classes.get(
            'storage_class2',
            self.storage_classes.get('block_storage_class'))
        self.provisioner = _storage_class["provisioner"]
        self.restsecretnamespace = _storage_class["restsecretnamespace"]
        self.restuser = _storage_class["restuser"]
        self.resturl = _storage_class["resturl"]

        # using pvc size count as 1 by default
        self.pvcsize = 1

        # using pvc count as 10 by default
        self.pvccount = 10

        # create gluster block storage class, PVC and user app pod
        self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
            self.deploy_resouces()
        )

        # verify storage class
        oc_get_yaml(self.oc_node, "sc", self.sc_name)

        # verify pod creation, it's state and get the pod name
        self.pod_name = get_pod_name_from_dc(
            self.oc_node, self.dc_name, timeout=180, wait_step=3
        )
        wait_for_pod_be_ready(
            self.oc_node, self.pod_name, timeout=180, wait_step=3
        )
        verify_pvc_status_is_bound(self.oc_node, self.pvc_name)

        # create pvc's to test
        self.pvc_list = []
        for pvc in range(self.pvccount):
            test_pvc_name = oc_create_pvc(
                self.oc_node, self.sc_name,
                pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
            )
            self.pvc_list.append(test_pvc_name)
            self.addCleanup(
                wait_for_resource_absence, self.oc_node, "pvc", test_pvc_name,
                timeout=600, interval=10
            )

        for pvc_name in self.pvc_list:
            self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
    def deploy_resouces(self):
        """Deploys required resources storage class, pvc and user app
           with continous I/O runnig

        Returns:
            sc_name (str): deployed storage class name
            pvc_name (str): deployed persistent volume claim name
            dc_name (str): deployed deployment config name
            secretname (str): created secret file name
        """
        secretname = oc_create_secret(
            self.oc_node, namespace=self.restsecretnamespace,
            data_key=self.heketi_cli_key, secret_type=self.provisioner)
        self.addCleanup(oc_delete, self.oc_node, 'secret', secretname)

        sc_name = oc_create_sc(
            self.oc_node,
            sc_name_prefix=self.prefix, provisioner=self.provisioner,
            resturl=self.resturl, restuser=self.restuser,
            restsecretnamespace=self.restsecretnamespace,
            restsecretname=secretname, volumenameprefix=self.prefix
        )
        self.addCleanup(oc_delete, self.oc_node, "sc", sc_name)

        pvc_name = oc_create_pvc(
            self.oc_node, sc_name,
            pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
        )
        self.addCleanup(
            wait_for_resource_absence, self.oc_node, "pvc", pvc_name,
            timeout=120, interval=5
        )
        self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)

        dc_name = oc_create_app_dc_with_io(
            self.oc_node, pvc_name, dc_name_prefix=self.prefix
        )
        self.addCleanup(oc_delete, self.oc_node, "dc", dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.oc_node, dc_name, 0)

        return sc_name, pvc_name, dc_name, secretname
    def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
        """Validate PVC with glusterblock creation when heketi pod is down"""
        datafile_path = '/mnt/fake_file_for_%s' % self.id()

        # Create DC with attached PVC
        sc_name = self.create_storage_class()
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name)

        # Write test data
        write_data_cmd = ("dd if=/dev/urandom of=%s bs=1K count=100" %
                          datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(self.node,
                                               self.heketi_dc_name,
                                               timeout=10,
                                               wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        # Create second PVC
        app_2_pvc_name = oc_create_pvc(self.node,
                                       pvc_name_prefix='autotest-block2',
                                       sc_name=sc_name)
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc',
                        app_2_pvc_name)
        self.addCleanup(oc_delete, self.node, 'pvc', app_2_pvc_name)

        # Create second app POD
        app_2_dc_name = oc_create_app_dc_with_io(self.node, app_2_pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', app_2_dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, app_2_dc_name,
                        0)
        app_2_pod_name = get_pod_name_from_dc(self.node, app_2_dc_name)

        # Bring Heketi pod back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(self.node,
                                                   self.heketi_dc_name,
                                                   timeout=10,
                                                   wait_step=2)
        wait_for_pod_be_ready(self.node,
                              new_heketi_pod_name,
                              wait_step=5,
                              timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(self.node,
                              app_2_pod_name,
                              timeout=150,
                              wait_step=3)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
            self):
        """Verify that block volume creation fails when we create block
        volume of size greater than the default size of BHV.
        Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        with self.assertRaises(ExecutionError):
            # create a block vol greater than default BHV size
            bvol_info = heketi_blockvolume_create(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  (default_bhv_size + 1),
                                                  json=True)
            self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                            self.heketi_server_url, bvol_info['id'])

        sc_name = self.create_storage_class()

        # create a block pvc greater than default BHV size
        pvc_name = oc_create_pvc(self.node,
                                 sc_name,
                                 pvc_size=(default_bhv_size + 1))
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(oc_delete,
                        self.node,
                        'pvc',
                        pvc_name,
                        raise_on_absence=False)

        wait_for_events(self.node,
                        pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_type='Warning',
                        event_reason='ProvisioningFailed')

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        (default_bhv_size + reserve_size + 2),
                                        block=True,
                                        json=True)
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])

        # Cleanup PVC before block hosting volume to avoid failures
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(oc_delete,
                        self.node,
                        'pvc',
                        pvc_name,
                        raise_on_absence=False)

        verify_pvc_status_is_bound(self.node, pvc_name)
    def test_targetcli_failure_during_block_pvc_creation(self):
        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        # Disable redundant nodes and leave just 3 nodes online
        h_node_id_list = heketi_node_list(h_node, h_server)
        self.assertGreater(len(h_node_id_list), 2)
        for node_id in h_node_id_list[3:]:
            heketi_node_disable(h_node, h_server, node_id)
            self.addCleanup(heketi_node_enable, h_node, h_server, node_id)

        # Gather info about the Gluster node we are going to use for killing
        # targetcli processes.
        chosen_g_node_id = h_node_id_list[0]
        chosen_g_node_info = heketi_node_info(h_node,
                                              h_server,
                                              chosen_g_node_id,
                                              json=True)
        chosen_g_node_ip = chosen_g_node_info['hostnames']['storage'][0]
        chosen_g_node_hostname = chosen_g_node_info['hostnames']['manage'][0]
        chosen_g_node_ip_and_hostname = set(
            (chosen_g_node_ip, chosen_g_node_hostname))

        g_pods = oc_get_custom_resource(
            self.node,
            'pod', [
                ':.metadata.name', ':.status.hostIP', ':.status.podIP',
                ':.spec.nodeName'
            ],
            selector='glusterfs-node=pod')
        if g_pods and g_pods[0]:
            for g_pod in g_pods:
                if chosen_g_node_ip_and_hostname.intersection(set(g_pod[1:])):
                    host_to_run_cmds = self.node
                    g_pod_prefix, g_pod = 'oc exec %s -- ' % g_pod[0], g_pod[0]
                    break
            else:
                err_msg = (
                    'Failed to find Gluster pod filtering it by following IPs '
                    'and hostnames: %s\nFound following Gluster pods: %s') % (
                        chosen_g_node_ip_and_hostname, g_pods)
                g.log.error(err_msg)
                raise AssertionError(err_msg)
        else:
            host_to_run_cmds, g_pod_prefix, g_pod = chosen_g_node_ip, '', ''

        # Schedule deletion of targetcli process
        file_for_bkp, pvc_number = "~/.targetcli/prefs.bin", 10
        self.cmd_run("%scp %s %s_backup" %
                     (g_pod_prefix, file_for_bkp, file_for_bkp),
                     hostname=host_to_run_cmds)
        self.addCleanup(self.cmd_run,
                        "%srm -f %s_backup" % (g_pod_prefix, file_for_bkp),
                        hostname=host_to_run_cmds)
        kill_targetcli_services_cmd = (
            "while true; do "
            "  %spkill targetcli || echo 'failed to kill targetcli process'; "
            "done" % g_pod_prefix)
        loop_for_killing_targetcli_process = g.run_async(
            host_to_run_cmds, kill_targetcli_services_cmd, "root")
        try:
            # Create bunch of PVCs
            sc_name, pvc_names = self.create_storage_class(), []
            for i in range(pvc_number):
                pvc_names.append(oc_create_pvc(self.node, sc_name, pvc_size=1))
            self.addCleanup(wait_for_resources_absence, self.node, 'pvc',
                            pvc_names)
            self.addCleanup(oc_delete, self.node, 'pvc', ' '.join(pvc_names))

            # Check that we get expected number of provisioning errors
            timeout, wait_step, succeeded_pvcs, failed_pvcs = 120, 1, [], []
            _waiter, err_msg = Waiter(timeout=timeout, interval=wait_step), ""
            for pvc_name in pvc_names:
                _waiter._attempt = 0
                for w in _waiter:
                    events = get_events(self.node,
                                        pvc_name,
                                        obj_type="PersistentVolumeClaim")
                    for event in events:
                        if event['reason'] == 'ProvisioningSucceeded':
                            succeeded_pvcs.append(pvc_name)
                            break
                        elif event['reason'] == 'ProvisioningFailed':
                            failed_pvcs.append(pvc_name)
                            break
                    else:
                        continue
                    break
                if w.expired:
                    err_msg = (
                        "Failed to get neither 'ProvisioningSucceeded' nor "
                        "'ProvisioningFailed' statuses for all the PVCs in "
                        "time. Timeout was %ss, interval was %ss." %
                        (timeout, wait_step))
                    g.log.error(err_msg)
                    raise AssertionError(err_msg)
            self.assertGreater(len(failed_pvcs), len(succeeded_pvcs))
        finally:
            # Restore targetcli workability
            loop_for_killing_targetcli_process._proc.terminate()

            # Revert breakage back which can be caused by BZ-1769426
            check_bkp_file_size_cmd = ("%sls -lah %s | awk '{print $5}'" %
                                       (g_pod_prefix, file_for_bkp))
            bkp_file_size = self.cmd_run(check_bkp_file_size_cmd,
                                         hostname=host_to_run_cmds).strip()
            if bkp_file_size == "0":
                self.cmd_run("%smv %s_backup %s" %
                             (g_pod_prefix, file_for_bkp, file_for_bkp),
                             hostname=host_to_run_cmds)
                breakage_err_msg = (
                    "File located at '%s' was corrupted (zero size) on the "
                    "%s. Looks like BZ-1769426 took effect. \n"
                    "Don't worry, it has been restored after test failure." %
                    (file_for_bkp, "'%s' Gluster pod" % g_pod
                     if g_pod else "'%s' Gluster node" % chosen_g_node_ip))
                g.log.error(breakage_err_msg)
                if err_msg:
                    breakage_err_msg = "%s\n%s" % (err_msg, breakage_err_msg)
                raise AssertionError(breakage_err_msg)

        # Wait for all the PVCs to be in bound state
        wait_for_pvcs_be_bound(self.node, pvc_names, timeout=300, wait_step=5)
    def create_sc_with_parameter(self, vol_type, success=False, parameter={}):
        """creates storage class, pvc and validates event

        Args:
            vol_type (str): storage type either gluster file or block
            success (bool): if True check for successfull else failure
                            for pvc creation event
            parameter (dict): dictionary with storage class parameters
        """
        if vol_type == "glusterfile":
            sc = self.storage_classes.get(
                'storage_class1',
                self.storage_classes.get('file_storage_class'))

            # Create secret file for usage in storage class
            self.secret_name = oc_create_secret(
                self.ocp_master_node[0],
                namespace=sc.get('secretnamespace', 'default'),
                data_key=self.heketi_cli_key,
                secret_type=sc.get('provisioner', 'kubernetes.io/glusterfs'))
            self.addCleanup(
                oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
            sc_parameter = {
                "secretnamespace": sc['secretnamespace'],
                "secretname": self.secret_name,
                "volumetype": "replicate:3"
            }
        elif vol_type == "glusterblock":
            sc = self.storage_classes.get(
                'storage_class2',
                self.storage_classes.get('block_storage_class'))

            # Create secret file for usage in storage class
            self.secret_name = oc_create_secret(
                self.ocp_master_node[0],
                namespace=sc.get('restsecretnamespace', 'default'),
                data_key=self.heketi_cli_key,
                secret_type=sc.get('provisioner', 'gluster.org/glusterblock'))
            self.addCleanup(
                oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
            sc_parameter = {
                "provisioner": "gluster.org/glusterblock",
                "restsecretnamespace": sc['restsecretnamespace'],
                "restsecretname": self.secret_name,
                "hacount": sc['hacount']
            }
        else:
            err_msg = "invalid vol_type %s" % vol_type
            g.log.error(err_msg)
            raise AssertionError(err_msg)
        sc_parameter['resturl'] = sc['resturl']
        sc_parameter['restuser'] = sc['restuser']
        sc_parameter.update(parameter)

        # Create storage class
        self.sc_name = oc_create_sc(
            self.ocp_master_node[0], **sc_parameter)
        self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)

        # Create PVC
        self.pvc_name = oc_create_pvc(self.ocp_client[0], self.sc_name)
        self.addCleanup(
            wait_for_resource_absence, self.ocp_master_node[0],
            'pvc', self.pvc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0],
                        'pvc', self.pvc_name)

        # Wait for event with error
        event_reason = 'ProvisioningFailed'
        if success:
            event_reason = 'ProvisioningSucceeded'
        wait_for_events(self.ocp_master_node[0],
                        obj_name=self.pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_reason=event_reason)
    def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
        """Validate reuse of volume space after deletion of PVCs"""
        min_storage_gb = 10

        # Set arbiter:disabled tags to the first 2 nodes
        data_nodes = []
        biggest_disks = []
        self.assertGreater(len(self.node_id_list), 2)
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            biggest_disk_free_space = 0
            for device in node_info['devices']:
                disk_free_space = int(device['storage']['free'])
                if disk_free_space < (min_storage_gb * 1024**2):
                    self.skipTest(
                        "Devices are expected to have more than "
                        "%sGb of free space" % min_storage_gb)
                if disk_free_space > biggest_disk_free_space:
                    biggest_disk_free_space = disk_free_space
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], 'disabled',
                    revert_to=device.get('tags', {}).get('arbiter'))
            biggest_disks.append(biggest_disk_free_space)
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled',
                revert_to=node_info.get('tags', {}).get('arbiter'))
            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        arbiter_nodes = []
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], 'required',
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'required',
                revert_to=node_info.get('tags', {}).get('arbiter'))
            arbiter_nodes.append(node_info)

        # Calculate size and amount of volumes to be created
        pvc_size = int(min(biggest_disks) / 1024**2)
        pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create and delete 3 small volumes concurrently
        pvc_names = []
        for i in range(3):
            pvc_name = oc_create_pvc(
                self.node, self.sc_name, pvc_name_prefix='arbiter-pvc',
                pvc_size=int(pvc_size / 3))
            pvc_names.append(pvc_name)
        exception_exists = False
        for pvc_name in pvc_names:
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                for pvc_name in pvc_names:
                    self.addCleanup(
                        wait_for_resource_absence, self.node, 'pvc', pvc_name)
                for pvc_name in pvc_names:
                    self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                exception_exists = True
        if exception_exists:
            raise
        for pvc_name in pvc_names:
            oc_delete(self.node, 'pvc', pvc_name)
        for pvc_name in pvc_names:
            wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Create and delete big volumes in a loop
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                self.node, self.sc_name, pvc_name_prefix='arbiter-pvc',
                pvc_size=pvc_size)
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                self.addCleanup(
                    wait_for_resource_absence, self.node, 'pvc', pvc_name)
                self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                raise
            oc_delete(self.node, 'pvc', pvc_name)
            wait_for_resource_absence(self.node, 'pvc', pvc_name)
    def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
        """Validate PVC with glusterblock creation when heketi pod is down"""
        datafile_path = '/mnt/fake_file_for_%s' % self.id()

        # Create DC with attached PVC
        sc_name = self.create_storage_class()
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name)

        # Write test data
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        # Create second PVC
        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix='autotest-block2', sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name
        )

        # Create second app POD
        app_2_dc_name = oc_create_app_dc_with_io(self.node, app_2_pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', app_2_dc_name)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.node, app_2_dc_name, 0)
        app_2_pod_name = get_pod_name_from_dc(self.node, app_2_dc_name)

        # Bring Heketi pod back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=150, wait_step=3)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
Exemplo n.º 12
0
    def test_dev_path_block_volume_delete(self):
        """Validate device path name changes the deletion of
           already existing file volumes
        """

        pvc_size, pvc_amount = 2, 5
        pvc_names, gluster_block_list, vol_details = [], [], []

        # Fetch BHV list
        h_bhv_list_before = heketi_ops.get_block_hosting_volume_list(
            self.h_node, self.h_server).keys()

        # Create storage class
        sc_name = self.create_storage_class()

        # Delete created BHV and BV as cleanup during failures
        self.addCleanup(self._cleanup_heketi_volumes, h_bhv_list_before)

        # Create PVC's
        for i in range(0, pvc_amount):
            pvc_name = openshift_ops.oc_create_pvc(self.node,
                                                   sc_name,
                                                   pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(openshift_ops.wait_for_resource_absence, self.node,
                            'pvc', pvc_name)
            self.addCleanup(openshift_ops.oc_delete,
                            self.node,
                            'pvc',
                            pvc_name,
                            raise_on_absence=False)

        # Wait for PVC's to be bound
        openshift_ops.wait_for_pvc_be_bound(self.node, pvc_names)

        # Get volume name list
        for pvc_name in pvc_names:
            pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
            volume_name = openshift_ops.get_vol_names_from_pv(self.node,
                                                              pv_name,
                                                              vol_type='block')
            vol_details.append(volume_name)

        # Get BHV list after BV creation
        h_bhv_list_after = heketi_ops.get_block_hosting_volume_list(
            self.h_node, self.h_server).keys()
        self.assertTrue(h_bhv_list_after, "Failed to get the BHV list")

        # Validate BV's count
        self.validate_block_volumes_count(self.h_node, self.h_server,
                                          self.node_ip)

        # Collect pvs info and detach disks and collect pvs info
        pvs_info_before = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)
        self.detach_and_attach_vmdk(self.vm_name, self.node_hostname,
                                    self.devices_list)
        pvs_info_after = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)

        # Compare pvs info before and after
        for (path, uuid, vg_name), (_path, _uuid,
                                    _vg_name) in zip(pvs_info_before[:-1],
                                                     pvs_info_after[1:]):
            self.assertEqual(
                uuid, _uuid, "pv_uuid check failed. Expected:{},"
                "Actual: {}".format(uuid, _uuid))
            self.assertEqual(
                vg_name, _vg_name, "vg_name check failed. Expected:"
                "{}, Actual:{}".format(vg_name, _vg_name))

        # Delete created PVC's
        for pvc_name in pvc_names:
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)

        # Wait for pvc to get deleted
        openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)

        # Get existing BHV list
        for bhv_name in h_bhv_list_after:
            b_list = block_libs.get_block_list(self.node_ip, volname=bhv_name)
            self.assertIsNotNone(gluster_block_list,
                                 "Failed to get gluster block list")
            gluster_block_list.append(b_list)

        # Get list of block volumes using heketi
        h_blockvol_list = heketi_ops.heketi_blockvolume_list(self.h_node,
                                                             self.h_server,
                                                             json=True)

        # Validate volumes created are not present
        for vol in vol_details:
            self.assertNotIn(vol, gluster_block_list,
                             "Failed to delete volume {}".format(vol))
            self.assertNotIn(vol, h_blockvol_list['blockvolumes'],
                             "Failed to delete blockvolume '{}'".format(vol))
Exemplo n.º 13
0
    def test_dev_path_file_volume_delete(self):
        """Validate device path name changes the deletion of
           already existing file volumes
        """

        pvc_size, pvc_amount = 2, 5
        vol_details, pvc_names = [], []

        # Create PVC's
        sc_name = self.create_storage_class()
        for i in range(0, pvc_amount):
            pvc_name = openshift_ops.oc_create_pvc(self.node,
                                                   sc_name,
                                                   pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(openshift_ops.wait_for_resource_absence, self.node,
                            'pvc', pvc_name)
            self.addCleanup(openshift_ops.oc_delete,
                            self.node,
                            'pvc',
                            pvc_name,
                            raise_on_absence=False)

        # Wait for PVC's to be bound
        openshift_ops.wait_for_pvcs_be_bound(self.node, pvc_names)

        # Get Volumes name and validate volumes count
        for pvc_name in pvc_names:
            pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
            volume_name = openshift_ops.get_vol_names_from_pv(
                self.node, pv_name)
            vol_details.append(volume_name)

        # Verify file volumes count
        self.validate_file_volumes_count(self.h_node, self.h_server,
                                         self.node_ip)

        # Collect pvs info and detach disks and get pvs info
        pvs_info_before = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)
        self.detach_and_attach_vmdk(self.vm_name, self.node_hostname,
                                    self.devices_list)
        pvs_info_after = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)

        # Compare pvs info before and after
        for (path, uuid, vg_name), (_path, _uuid,
                                    _vg_name) in zip(pvs_info_before[:-1],
                                                     pvs_info_after[1:]):
            self.assertEqual(
                uuid, _uuid, "pv_uuid check failed. Expected:{},"
                "Actual: {}".format(uuid, _uuid))
            self.assertEqual(
                vg_name, _vg_name, "vg_name check failed. Expected:"
                "{}, Actual:{}".format(vg_name, _vg_name))

        # Delete created PVC's
        for pvc_name in pvc_names:
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)

        # Wait for resource absence and get volume list
        openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)
        vol_list = volume_ops.get_volume_list(self.node_ip)
        self.assertIsNotNone(vol_list, "Failed to get volumes list")

        # Validate volumes created are not present
        for vol in vol_details:
            self.assertNotIn(vol, vol_list,
                             "Failed to delete volume {}".format(vol))
Exemplo n.º 14
0
    def create_sc_with_parameter(self, vol_type, success=False, parameter={}):
        """creates storage class, pvc and validates event

        Args:
            vol_type (str): storage type either gluster file or block
            success (bool): if True check for successfull else failure
                            for pvc creation event
            parameter (dict): dictionary with storage class parameters
        """
        if vol_type == "glusterfile":
            sc = self.storage_classes.get(
                'storage_class1',
                self.storage_classes.get('file_storage_class'))

            # Create secret file for usage in storage class
            self.secret_name = oc_create_secret(
                self.ocp_master_node[0],
                namespace=sc.get('secretnamespace', 'default'),
                data_key=self.heketi_cli_key,
                secret_type=sc.get('provisioner', 'kubernetes.io/glusterfs'))
            self.addCleanup(
                oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
            sc_parameter = {
                "secretnamespace": sc['secretnamespace'],
                "secretname": self.secret_name,
                "volumetype": "replicate:3"
            }
        elif vol_type == "glusterblock":
            sc = self.storage_classes.get(
                'storage_class2',
                self.storage_classes.get('block_storage_class'))

            # Create secret file for usage in storage class
            self.secret_name = oc_create_secret(
                self.ocp_master_node[0],
                namespace=sc.get('restsecretnamespace', 'default'),
                data_key=self.heketi_cli_key,
                secret_type=sc.get('provisioner', 'gluster.org/glusterblock'))
            self.addCleanup(
                oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
            sc_parameter = {
                "provisioner": "gluster.org/glusterblock",
                "restsecretnamespace": sc['restsecretnamespace'],
                "restsecretname": self.secret_name,
                "hacount": sc['hacount']
            }
        else:
            err_msg = "invalid vol_type %s" % vol_type
            g.log.error(err_msg)
            raise AssertionError(err_msg)
        sc_parameter['resturl'] = sc['resturl']
        sc_parameter['restuser'] = sc['restuser']
        sc_parameter.update(parameter)

        # Create storage class
        self.sc_name = oc_create_sc(
            self.ocp_master_node[0], **sc_parameter)
        self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)

        # Create PVC
        self.pvc_name = oc_create_pvc(self.ocp_client[0], self.sc_name)
        self.addCleanup(
            wait_for_resource_absence, self.ocp_master_node[0],
            'pvc', self.pvc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0],
                        'pvc', self.pvc_name)

        # Wait for event with error
        event_reason = 'ProvisioningFailed'
        if success:
            event_reason = 'ProvisioningSucceeded'
        wait_for_events(self.ocp_master_node[0],
                        obj_name=self.pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_reason=event_reason)
    def create_and_wait_for_pvcs(
            self, pvc_size=1, pvc_name_prefix="autotests-pvc", pvc_amount=1,
            sc_name=None, timeout=600, wait_step=10, skip_waiting=False,
            skip_cleanup=False):
        """Create multiple PVC's not waiting for it

        Args:
            pvc_size (int): size of PVC, default value is 1
            pvc_name_prefix (str): volume prefix for each PVC, default value is
                                   'autotests-pvc'
            pvc_amount (int): number of PVC's, default value is 1
            sc_name (str): storage class to create PVC, default value is None,
                           which will cause automatic creation of sc.
            timeout (int): timeout time for waiting for PVC's to get bound
            wait_step (int): waiting time between each try of PVC status check
            skip_waiting (bool): boolean value which defines whether
                                 we need to wait for PVC creation or not.
        Returns:
            List: list of PVC names
        """
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class(skip_cleanup=skip_cleanup)

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                node, sc_name, pvc_name_prefix=pvc_name_prefix,
                pvc_size=pvc_size)
            pvc_names.append(pvc_name)
        if not skip_cleanup:
            self.addCleanup(
                wait_for_resources_absence, node, 'pvc', pvc_names)

        # Wait for PVCs to be in bound state
        try:
            if not skip_waiting:
                wait_for_pvcs_be_bound(node, pvc_names, timeout, wait_step)
        finally:
            if skip_cleanup:
                return pvc_names

            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    if not pv_name and skip_waiting:
                        continue
                    self.addCleanup(oc_delete, node, 'pv', pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(
                        node, 'pv', custom, pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete, node, 'pvc', pvc_name,
                                raise_on_absence=False)
        return pvc_names
    def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
        """Validate dynamic provisioning for gluster file when heketi pod down
        """
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        sc_name = self.create_storage_class()

        # Create PVC
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix="autotest-file", sc_name=sc_name)

        # Create app POD with attached volume
        app_1_pod_name = oc_create_tiny_pod_with_volume(
            self.node,
            app_1_pvc_name,
            "test-pvc-mount-on-app-pod",
            mount_path=mount_path,
            image=self.io_container_image_cirros)
        self.addCleanup(wait_for_resource_absence, self.node, 'pod',
                        app_1_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(self.node,
                              app_1_pod_name,
                              timeout=60,
                              wait_step=2)

        # Write data to the app POD
        write_data_cmd = ("dd if=/dev/urandom of=%s bs=1K count=100" %
                          datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(self.node,
                                               self.heketi_dc_name,
                                               timeout=10,
                                               wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        app_2_pvc_name = oc_create_pvc(self.node,
                                       pvc_name_prefix="autotest-file2",
                                       sc_name=sc_name)
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc',
                        app_2_pvc_name)
        self.addCleanup(oc_delete,
                        self.node,
                        'pvc',
                        app_2_pvc_name,
                        raise_on_absence=False)

        # Create second app POD
        app_2_pod_name = oc_create_tiny_pod_with_volume(
            self.node,
            app_2_pvc_name,
            "test-pvc-mount-on-app-pod",
            mount_path=mount_path,
            image=self.io_container_image_cirros)
        self.addCleanup(wait_for_resource_absence, self.node, 'pod',
                        app_2_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)

        # Bring Heketi POD back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(self.node,
                                                   self.heketi_dc_name,
                                                   timeout=10,
                                                   wait_step=2)
        wait_for_pod_be_ready(self.node,
                              new_heketi_pod_name,
                              wait_step=5,
                              timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(self.node,
                              app_2_pod_name,
                              timeout=60,
                              wait_step=2)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
    def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
        """Validate dynamic provisioning for gluster file when heketi pod down
        """
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        sc_name = self.create_storage_class()

        # Create PVC
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix="autotest-file", sc_name=sc_name
        )

        # Create app POD with attached volume
        app_1_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(
            self.node, app_1_pod_name, timeout=60, wait_step=2)

        # Write data to the app POD
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix="autotest-file2", sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name, raise_on_absence=False
        )

        # Create second app POD
        app_2_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)

        # Bring Heketi POD back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=60, wait_step=2)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
Exemplo n.º 18
0
    def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
        """Validate reuse of volume space after deletion of PVCs"""
        min_storage_gb = 10

        # Set arbiter:disabled tags to the first 2 nodes
        data_nodes = []
        biggest_disks = []
        self.assertGreater(len(self.node_id_list), 2)
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            biggest_disk_free_space = 0
            for device in node_info['devices']:
                disk_free_space = int(device['storage']['free'])
                if disk_free_space < (min_storage_gb * 1024**2):
                    self.skipTest("Devices are expected to have more than "
                                  "%sGb of free space" % min_storage_gb)
                if disk_free_space > biggest_disk_free_space:
                    biggest_disk_free_space = disk_free_space
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'],
                    'disabled',
                    revert_to=device.get('tags', {}).get('arbiter'))
            biggest_disks.append(biggest_disk_free_space)
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      node_id,
                                                      'disabled',
                                                      revert_to=node_info.get(
                                                          'tags',
                                                          {}).get('arbiter'))
            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        arbiter_nodes = []
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'],
                    'required',
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      node_id,
                                                      'required',
                                                      revert_to=node_info.get(
                                                          'tags',
                                                          {}).get('arbiter'))
            arbiter_nodes.append(node_info)

        # Calculate size and amount of volumes to be created
        pvc_size = int(min(biggest_disks) / 1024**2)
        pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create and delete 3 small volumes concurrently
        pvc_names = self.create_and_wait_for_pvcs(
            pvc_size=int(pvc_size / 3),
            pvc_name_prefix='arbiter-pvc',
            pvc_amount=3,
            sc_name=self.sc_name)

        for pvc_name in pvc_names:
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
        for pvc_name in pvc_names:
            openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Create and delete big volumes in a loop
        for i in range(pvc_amount):
            pvc_name = openshift_ops.oc_create_pvc(
                self.node,
                self.sc_name,
                pvc_name_prefix='arbiter-pvc',
                pvc_size=pvc_size)
            try:
                openshift_ops.verify_pvc_status_is_bound(
                    self.node, pvc_name, 300, 10)
            except Exception:
                self.addCleanup(openshift_ops.wait_for_resource_absence,
                                self.node, 'pvc', pvc_name)
                self.addCleanup(openshift_ops.oc_delete, self.node, 'pvc',
                                pvc_name)
                raise
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
            openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)
Exemplo n.º 19
0
    def create_and_wait_for_pvcs(self,
                                 pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1,
                                 sc_name=None,
                                 timeout=120,
                                 wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(node,
                                     sc_name,
                                     pvc_name_prefix=pvc_name_prefix,
                                     pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete,
                                    node,
                                    'pv',
                                    pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(node, 'pv', custom,
                                                    pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete,
                                node,
                                'pvc',
                                pvc_name,
                                raise_on_absence=False)

        return pvc_names