def test_restart_gluster_block_provisioner_pod(self):
        """Restart gluster-block provisioner pod."""

        # Get glusterblock provisioner dc name
        cmd = ("oc get dc | awk '{ print $1 }' | "
               "grep -e glusterblock -e provisioner")
        dc_name = command.cmd_run(cmd, self.ocp_master_node[0], True)

        # create heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=5, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 5")
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])

        # restart gluster-block-provisioner-pod
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        oc_delete(self.ocp_master_node[0], 'pod', pod_name)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)

        # new gluster-pod name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)

        # create new heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 2")
        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url,
                                  vol_info['id'])
Пример #2
0
    def test_restart_gluster_block_provisioner_pod(self):
        """Restart gluster-block provisioner pod
        """

        # create heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=5,
                                             json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 5")
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])

        # restart gluster-block-provisioner-pod
        dc_name = "glusterblock-%s-provisioner-dc" % self.storage_project_name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        oc_delete(self.ocp_master_node[0], 'pod', pod_name)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)

        # new gluster-pod name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)

        # create new heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=2,
                                             json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 2")
        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_info['id'])
    def test_restart_gluster_block_provisioner_pod(self):
        """Restart gluster-block provisioner pod
        """

        # create heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=5, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 5")
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])

        # restart gluster-block-provisioner-pod
        dc_name = "glusterblock-%s-provisioner-dc" % self.storage_project_name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        oc_delete(self.ocp_master_node[0], 'pod', pod_name)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)

        # new gluster-pod name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)

        # create new heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 2")
        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url,
                                  vol_info['id'])
Пример #4
0
    def test_heketi_logs_after_heketi_pod_restart(self):

        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        find_string_in_log = r"Started background pending operations cleaner"
        ocp_node = self.ocp_master_node[0]

        # Restart heketi pod
        heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
        oc_delete(ocp_node,
                  'pod',
                  heketi_pod_name,
                  collect_logs=self.heketi_logs_before_delete)
        self.addCleanup(self._heketi_pod_delete_cleanup)
        wait_for_resource_absence(ocp_node, 'pod', heketi_pod_name)
        heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
        wait_for_pod_be_ready(ocp_node, heketi_pod_name)
        self.assertTrue(hello_heketi(h_node, h_server),
                        "Heketi server {} is not alive".format(h_server))

        # Collect logs after heketi pod restart
        cmd = "oc logs {}".format(heketi_pod_name)
        out = cmd_run(cmd, hostname=ocp_node)

        # Validate string is present in heketi logs
        pending_check = re.compile(find_string_in_log)
        entry_list = pending_check.findall(out)
        self.assertIsNotNone(entry_list,
                             "Failed to find entries in heketi logs")

        for entry in entry_list:
            self.assertEqual(
                entry, find_string_in_log,
                "Failed to validate, Expected {}; Actual {}".format(
                    find_string_in_log, entry))
Пример #5
0
    def test_pv_resize_with_prefix_for_name_and_size(
            self, create_vol_name_prefix=False, valid_size=True):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "Failed to execute command %s on %s" % (cmd, node))
        pv_name = get_pv_name_from_pvc(node, pvc_name)

        # If resize size is invalid then size should not change
        if valid_size:
            cmd = ("dd if=/dev/urandom of=%sfile2 "
                   "bs=100K count=10000") % dir_path
            with self.assertRaises(AssertionError):
                ret, out, err = oc_rsh(node, pod_name, cmd)
                msg = ("Command '%s' was expected to fail on '%s' node. "
                       "But it returned following: ret is '%s', err is '%s' "
                       "and out is '%s'" % (cmd, node, ret, err, out))
                raise ExecutionError(msg)
            pvc_size = 2
            resize_pvc(node, pvc_name, pvc_size)
            verify_pvc_size(node, pvc_name, pvc_size)
            verify_pv_size(node, pv_name, pvc_size)
        else:
            invalid_pvc_size = 'ten'
            with self.assertRaises(AssertionError):
                resize_pvc(node, pvc_name, invalid_pvc_size)
            verify_pvc_size(node, pvc_name, 1)
            verify_pv_size(node, pv_name, 1)

        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "Failed to execute command %s on %s" % (cmd, node))
Пример #6
0
    def test_validate_logging_pods_and_pvc(self):
        """Validate logging pods and PVC"""

        # Wait for kibana pod to be ready
        kibana_pod = openshift_ops.get_pod_name_from_dc(
            self._master, self._logging_kibana_dc)
        openshift_ops.wait_for_pod_be_ready(self._master, kibana_pod)

        # Wait for fluentd pods to be ready
        fluentd_custom = [
            ":.status.desiredNumberScheduled",
            ":.spec.template.metadata.labels"
        ]
        count_and_selector = openshift_ops.oc_get_custom_resource(
            self._master, "ds", fluentd_custom, self._logging_fluentd_ds)
        selector = count_and_selector[1][4:].replace(":", "=")
        openshift_ops.wait_for_pods_be_ready(self._master,
                                             int(count_and_selector[0]),
                                             selector)

        # Wait for PVC to be bound and elasticsearch pod to be ready
        es_pod = openshift_ops.get_pod_name_from_dc(self._master,
                                                    self._logging_es_dc)
        pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
        pvc_name = openshift_ops.oc_get_custom_resource(
            self._master, "pod", pvc_custom, es_pod)[0]
        openshift_ops.verify_pvc_status_is_bound(self._master, pvc_name)
        openshift_ops.wait_for_pod_be_ready(self._master, es_pod)

        # Validate iscsi and multipath
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            self._logging_es_dc,
            heketi_server_url=self._registry_heketi_server_url,
            is_registry_gluster=True)
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb = 1

        available_size_gb = self._available_disk_free_space()

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(
            self.node, pvc_name, image=self.io_container_image_cirros)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            exceed_size = available_size_gb + 10

            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, exceed_size)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
Пример #8
0
    def test_restart_heketi_pod(self):
        """Validate restarting heketi pod"""

        # create heketi volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=1,
                                        json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
        self.addCleanup(heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        vol_info['id'],
                        raise_on_error=False)
        topo_info = heketi_topology_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         json=True)

        # get heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)

        # delete heketi-pod (it restarts the pod)
        oc_delete(self.ocp_master_node[0],
                  'pod',
                  heketi_pod_name,
                  collect_logs=self.heketi_logs_before_delete)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod',
                                  heketi_pod_name)

        # get new heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], heketi_pod_name)

        # check heketi server is running
        self.assertTrue(
            hello_heketi(self.heketi_client_node, self.heketi_server_url),
            "Heketi server %s is not alive" % self.heketi_server_url)

        # compare the topology
        new_topo_info = heketi_topology_info(self.heketi_client_node,
                                             self.heketi_server_url,
                                             json=True)
        self.assertEqual(
            new_topo_info, topo_info, "topology info is not same,"
            " difference - %s" % diff(topo_info, new_topo_info))

        # create new volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=2,
                                        json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_info['id'])
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'],
                self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile "
               "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        with self.assertRaises(AssertionError):
            ret, out, err = oc_rsh(node, pod_name, cmd)
            msg = ("Command '%s' was expected to fail on '%s' node. "
                   "But it returned following: ret is '%s', err is '%s' "
                   "and out is '%s'" % (cmd, node, ret, err, out))
            raise ExecutionError(msg)

        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
    def _delete_and_wait_for_new_es_pod_to_come_up(self):

        # Force delete and wait for es pod to come up
        openshift_ops.switch_oc_project(
            self._master, self._logging_project_name)
        pod_name = openshift_ops.get_pod_name_from_dc(
            self._master, self._logging_es_dc)
        openshift_ops.oc_delete(self._master, 'pod', pod_name, is_force=True)
        openshift_ops.wait_for_resource_absence(self._master, 'pod', pod_name)
        new_pod_name = openshift_ops.get_pod_name_from_dc(
            self._master, self._logging_es_dc)
        openshift_ops.wait_for_pod_be_ready(
            self._master, new_pod_name, timeout=1800)
    def test_restart_heketi_pod(self):
        """Validate restarting heketi pod"""

        # create heketi volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=1, json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'], raise_on_error=False)
        topo_info = heketi_topology_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         json=True)

        # get heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)

        # delete heketi-pod (it restarts the pod)
        oc_delete(self.ocp_master_node[0], 'pod', heketi_pod_name)
        wait_for_resource_absence(self.ocp_master_node[0],
                                  'pod', heketi_pod_name)

        # get new heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0],
                              heketi_pod_name)

        # check heketi server is running
        self.assertTrue(
            hello_heketi(self.heketi_client_node, self.heketi_server_url),
            "Heketi server %s is not alive" % self.heketi_server_url
        )

        # compare the topology
        new_topo_info = heketi_topology_info(self.heketi_client_node,
                                             self.heketi_server_url,
                                             json=True)
        self.assertEqual(new_topo_info, topo_info, "topology info is not same,"
                         " difference - %s" % diff(topo_info, new_topo_info))

        # create new volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
        heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url, vol_info['id'])
Пример #12
0
    def _heketi_pod_delete_cleanup(self):
        """Cleanup for deletion of heketi pod using force delete"""
        try:
            # Fetch heketi pod after delete
            pod_name = openshift_ops.get_pod_name_from_dc(
                self.node, self.heketi_dc_name)
            openshift_ops.wait_for_pod_be_ready(self.node, pod_name, timeout=1)
        except exceptions.ExecutionError:

            # Force delete and wait for new pod to come up
            openshift_ops.oc_delete(self.node, 'pod', pod_name, is_force=True)
            openshift_ops.wait_for_resource_absence(self.node, 'pod', pod_name)
            new_pod_name = openshift_ops.get_pod_name_from_dc(
                self.node, self.heketi_dc_name)
            openshift_ops.wait_for_pod_be_ready(self.node, new_pod_name)
Пример #13
0
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertNotEqual(
            ret, 0, " This IO did not fail as expected "
            "command %s on %s" % (cmd, node))
        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
Пример #14
0
    def validate_multipath_info(self, hacount):
        """validates multipath command on the pod node

        Args:
            hacount (int): hacount for which multipath to be checked
        """
        # create pod using pvc created
        dc_name = oc_create_app_dc_with_io(
            self.ocp_master_node[0], self.pvc_name
        )
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.ocp_master_node[0], dc_name, 0
        )

        wait_for_pod_be_ready(
            self.ocp_master_node[0], pod_name, timeout=120, wait_step=3
        )

        # validates multipath for pod created with hacount
        self.assertTrue(
            validate_multipath_pod(self.ocp_master_node[0], pod_name, hacount),
            "multipath validation failed"
        )
Пример #15
0
    def _create_dcs_and_check_brick_placement(self, prefix, sc_name,
                                              heketi_zone_checking,
                                              zone_count):
        app_pods, count, label = [], 5, "testlabel=autotest"

        # Create multiple PVCs using storage class
        pvc_names = self.create_and_wait_for_pvcs(pvc_name_prefix=prefix,
                                                  pvc_amount=count,
                                                  sc_name=sc_name)

        # Create app dcs with I/O
        for pvc_name in pvc_names:
            app_dc = openshift_ops.oc_create_app_dc_with_io(
                self.node,
                pvc_name=pvc_name,
                dc_name_prefix=prefix,
                image=self.io_container_image_cirros)
            self.addCleanup(openshift_ops.oc_delete, self.node, 'dc', app_dc)

            # Get pod names and label them
            pod_name = openshift_ops.get_pod_name_from_dc(self.node, app_dc)
            openshift_ops.oc_label(self.node, 'pod', pod_name, label)
            app_pods.append(pod_name)

        # Wait for pods to be ready with the help of label selector
        openshift_ops.wait_for_pods_be_ready(self.node, count, label)

        # Validate brick placement in heketi zones
        self._validate_brick_placement_in_correct_zone_or_with_expand_pvc(
            heketi_zone_checking, pvc_name, zone_count)

        return app_pods
    def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
        # Create secret and storage class
        self.create_storage_class(
            create_vol_name_prefix=create_vol_name_prefix)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Verify Heketi volume name for prefix presence if provided
        if create_vol_name_prefix:
            ret = verify_volume_name_prefix(self.node,
                                            self.sc['volumenameprefix'],
                                            self.sc['secretnamespace'],
                                            pvc_name, self.sc['resturl'])
            self.assertTrue(ret, "verify volnameprefix failed")
        else:
            # Get the volume name and volume id from PV
            pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
            custom = [
                r':spec.glusterfs.path',
                r':metadata.annotations.'
                r'"gluster\.kubernetes\.io\/heketi-volume-id"'
            ]
            pv_vol_name, vol_id = oc_get_custom_resource(
                    self.ocp_client[0], 'pv', custom, pv_name)

            # check if the pv_volume_name is present in heketi
            # Check if volume name is "vol_"+volumeid or not
            heketi_vol_name = heketi_volume_info(
                self.ocp_client[0], self.heketi_server_url, vol_id,
                json=True)['name']
            self.assertEqual(pv_vol_name, heketi_vol_name,
                             'Volume with vol_id = %s not found'
                             'in heketidb' % vol_id)
            self.assertEqual(heketi_vol_name, 'vol_' + vol_id,
                             'Volume with vol_id = %s have a'
                             'custom perfix' % vol_id)
            out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0],
                                                 "gluster volume list")
            self.assertIn(pv_vol_name, out,
                          "Volume with id %s does not exist" % vol_id)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_io.log"
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
                    "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % filepath
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on %s" % (cmd, self.node))
    def test_recreate_app_pod_with_attached_block_pv(self):
        """Validate app pod attached block device I/O after restart"""
        datafile_path = '/mnt/temporary_test_file'

        # Create DC with POD and attached PVC to it
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Write data
        write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000"
        self.cmd_run(write_cmd % (pod_name, datafile_path))

        # Recreate app POD
        scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
        scale_dc_pod_amount_and_wait(self.node, dc_name, 1)
        new_pod_name = get_pod_name_from_dc(self.node, dc_name)

        # Check presence of already written file
        check_existing_file_cmd = ("oc exec %s -- ls %s" %
                                   (new_pod_name, datafile_path))
        out = self.cmd_run(check_existing_file_cmd)
        self.assertIn(datafile_path, out)

        # Perform I/O on the new POD
        self.cmd_run(write_cmd % (new_pod_name, datafile_path))
    def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
        # Create secret and storage class
        self.create_storage_class(
            create_vol_name_prefix=create_vol_name_prefix)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Verify Heketi volume name for prefix presence if provided
        if create_vol_name_prefix:
            ret = verify_volume_name_prefix(self.node,
                                            self.sc['volumenameprefix'],
                                            self.sc['secretnamespace'],
                                            pvc_name, self.sc['resturl'])
            self.assertTrue(ret, "verify volnameprefix failed")
        else:
            # Get the volume name and volume id from PV
            pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
            custom = [
                r':spec.glusterfs.path',
                r':metadata.annotations.'
                r'"gluster\.kubernetes\.io\/heketi-volume-id"'
            ]
            pv_vol_name, vol_id = oc_get_custom_resource(
                self.ocp_client[0], 'pv', custom, pv_name)

            # check if the pv_volume_name is present in heketi
            # Check if volume name is "vol_"+volumeid or not
            heketi_vol_name = heketi_volume_info(
                self.ocp_client[0], self.heketi_server_url, vol_id,
                json=True)['name']
            self.assertEqual(pv_vol_name, heketi_vol_name,
                             'Volume with vol_id = %s not found'
                             'in heketidb' % vol_id)
            self.assertEqual(heketi_vol_name, 'vol_' + vol_id,
                             'Volume with vol_id = %s have a'
                             'custom perfix' % vol_id)
            out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0],
                                                 "gluster volume list")
            self.assertIn(pv_vol_name, out,
                          "Volume with id %s does not exist" % vol_id)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_io.log"
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
                    "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % filepath
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on %s" % (cmd, self.node))
    def test_recreate_app_pod_with_attached_block_pv(self):
        """Validate app pod attached block device I/O after restart"""
        datafile_path = '/mnt/temporary_test_file'

        # Create DC with POD and attached PVC to it
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Write data
        write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000"
        self.cmd_run(write_cmd % (pod_name, datafile_path))

        # Recreate app POD
        scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
        scale_dc_pod_amount_and_wait(self.node, dc_name, 1)
        new_pod_name = get_pod_name_from_dc(self.node, dc_name)

        # Check presence of already written file
        check_existing_file_cmd = (
            "oc exec %s -- ls %s" % (new_pod_name, datafile_path))
        out = self.cmd_run(check_existing_file_cmd)
        self.assertIn(datafile_path, out)

        # Perform I/O on the new POD
        self.cmd_run(write_cmd % (new_pod_name, datafile_path))
    def test_node_failure_pv_mounted(self):
        """Test node failure when PV is mounted with app pods running"""
        filepath = "/mnt/file_for_testing_volume.log"
        pvc_name = self.create_and_wait_for_pvc()

        dc_and_pod_names = self.create_dcs_with_pvc(pvc_name)
        dc_name, pod_name = dc_and_pod_names[pvc_name]

        mount_point = "df -kh /mnt -P | tail -1 | awk '{{print $1}}'"
        pod_cmd = "oc exec {} -- {}".format(pod_name, mount_point)
        hostname = command.cmd_run(pod_cmd, hostname=self.node)
        hostname = hostname.split(":")[0]

        vm_name = find_vm_name_by_ip_or_hostname(hostname)
        self.addCleanup(power_on_vm_by_name, vm_name)
        power_off_vm_by_name(vm_name)

        cmd = "dd if=/dev/urandom of={} bs=1K count=100".format(filepath)
        ret, _, err = oc_rsh(self.node, pod_name, cmd)
        self.assertFalse(
            ret, "Failed to execute command {} on {} with error {}".format(
                cmd, self.node, err))

        oc_delete(self.node, 'pod', pod_name)
        wait_for_resource_absence(self.node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        ret, _, err = oc_rsh(self.node, pod_name, cmd)
        self.assertFalse(
            ret, "Failed to execute command {} on {} with error {}".format(
                cmd, self.node, err))
Пример #21
0
    def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
    def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile "
               "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
Пример #23
0
    def test_block_vol_offline_expand(self):
        """Test blockvol expansion while PVC is not in use"""
        node = self.ocp_master_node[0]

        pvc_name, dc_name, bvol_info = (
            self._block_vol_expand_common_offline_vs_online(False))

        # create and wait for job to be completed
        jobname = oc_create_offline_block_volume_expand_job(node, pvc_name)
        self.addCleanup(oc_delete, node, 'job', jobname)
        for w in waiter.Waiter(300, 5):
            if is_job_complete(node, jobname):
                break
        if w.expired:
            raise AssertionError(
                "block expand job {} is not completed".format(jobname))

        # verify expand size
        scale_dc_pod_amount_and_wait(node, dc_name[0], pod_amount=1)
        pod_name = get_pod_name_from_dc(node, dc_name[0])
        ret, size, _ = oc_rsh(
            node, pod_name,
            'df -kh /mnt | sed "/Filesystem/d" | awk \'{print $2}\' '
            '| sed "s/G//"')
        self.assertFalse(ret, "Failed to get size from client side")
        self.assertEqual(
            int(float(size)), bvol_info["size"], "new size is not "
            "reflected at mount point after block volume expand")
Пример #24
0
    def _heketi_pod_delete_cleanup(self):
        """Cleanup for deletion of heketi pod using force delete"""
        try:
            pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                            self.heketi_dc_name)

            # Check if heketi pod name is ready state
            wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, timeout=1)
        except ExecutionError:
            # Force delete and wait for new pod to come up
            oc_delete(self.ocp_master_node[0], 'pod', pod_name, is_force=True)
            wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)

            # Fetch heketi pod after force delete
            pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                            self.heketi_dc_name)
            wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)
    def test_brick_evict_on_more_than_three_node_with_one_down(self):
        """Test brick evict basic functionality and verify brick evict
        will success after one node down out of more than three nodes"""

        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        # Create heketi volume
        vol_info = heketi_ops.heketi_volume_create(
            h_node, h_server, 1, json=True)
        self.addCleanup(
            heketi_ops.heketi_volume_delete,
            h_node, h_server, vol_info.get('id'))

        # Get node on which heketi pod is scheduled
        heketi_pod = openshift_ops.get_pod_name_from_dc(
            self.ocp_client, self.heketi_dc_name)
        heketi_node = openshift_ops.oc_get_custom_resource(
            self.ocp_client, 'pod', '.:spec.nodeName', heketi_pod)[0]

        # Get brick id and glusterfs node which is not heketi node
        for node in vol_info.get('bricks', {}):
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server, node.get('node'), json=True)
            hostname = node_info.get('hostnames').get('manage')[0]
            if hostname != heketi_node:
                brick_id = node.get('id')
                break

        self._power_off_node_and_wait_node_to_be_not_ready(hostname)

        # Perform brick evict operation
        heketi_ops.heketi_brick_evict(h_node, h_server, brick_id)

        # Get volume info after brick evict operation
        vol_info_new = heketi_ops.heketi_volume_info(
            h_node, h_server, vol_info.get('id'), json=True)

        # Get previous and new bricks from volume
        bricks_old = set(
            {brick.get('path') for brick in vol_info.get("bricks")})
        bricks_new = set(
            {brick.get('path') for brick in vol_info_new.get("bricks")})
        self.assertEqual(
            len(bricks_new - bricks_old), 1,
            "Brick was not replaced with brick evict for vol \n {}".format(
                vol_info_new))

        # Get gluster volume info
        g_vol_info = self._get_gluster_vol_info(vol_info_new.get('name'))

        # Validate bricks on gluster volume and heketi volume
        g_bricks = set(
            {brick.get('name').split(":")[1]
                for brick in g_vol_info.get("bricks", {}).get("brick")})
        self.assertEqual(
            bricks_new, g_bricks, "gluster vol info and heketi vol info "
            "mismatched after brick evict {} \n {}".format(
                g_bricks, g_vol_info))
 def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10):
     dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name)
     self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
     self.addCleanup(
         scale_dc_pod_amount_and_wait, self.ocp_client[0], dc_name, 0)
     pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name)
     wait_for_pod_be_ready(self.ocp_client[0], pod_name,
                           timeout=timeout, wait_step=wait_step)
     return dc_name, pod_name
    def test_brick_evict_on_three_node_with_one_down(self):
        """Test brick evict basic functionality and verify brick evict
        will fail after node down if nodes are three"""

        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        # Disable node if more than 3
        node_list = heketi_ops.heketi_node_list(h_node, h_server)
        if len(node_list) > 3:
            for node_id in node_list[3:]:
                heketi_ops.heketi_node_disable(h_node, h_server, node_id)
                self.addCleanup(heketi_ops.heketi_node_enable, h_node,
                                h_server, node_id)

        # Create heketi volume
        vol_info = heketi_ops.heketi_volume_create(h_node,
                                                   h_server,
                                                   1,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_node, h_server,
                        vol_info.get('id'))

        # Get node on which heketi pod is scheduled
        heketi_pod = openshift_ops.get_pod_name_from_dc(
            self.ocp_client, self.heketi_dc_name)
        heketi_node = openshift_ops.oc_get_custom_resource(
            self.ocp_client, 'pod', '.:spec.nodeName', heketi_pod)[0]

        # Get list of hostname from node id
        host_list = []
        for node_id in node_list[3:]:
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_server,
                                                    node_id,
                                                    json=True)
            host_list.append(node_info.get('hostnames').get('manage')[0])

        # Get brick id and glusterfs node which is not heketi node
        for node in vol_info.get('bricks', {}):
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_server,
                                                    node.get('node'),
                                                    json=True)
            hostname = node_info.get('hostnames').get('manage')[0]
            if (hostname != heketi_node) and (hostname not in host_list):
                brick_id = node.get('id')
                break

        self._power_off_node_and_wait_node_to_be_not_ready(hostname)

        # Perform brick evict operation
        try:
            heketi_ops.heketi_brick_evict(h_node, h_server, brick_id)
        except AssertionError as e:
            if ('No Replacement was found' not in six.text_type(e)):
                raise
    def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
        """Validate retain policy for gluster-block after PVC deletion"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)

        try:
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
            oc_delete(self.node, 'dc', dc_name)

        # get the name of volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)

        custom = [
            r':.metadata.annotations."gluster\.org\/volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]
        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        # checking the retainPolicy of pvc
        self.assertEqual(reclaim_policy, 'Retain')

        # delete the pvc
        oc_delete(self.node, 'pvc', self.pvc_name)

        # check if pv is also deleted or not
        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        # getting the blockvol list
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertIn(vol_id, blocklist)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_id)
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertNotIn(vol_id, blocklist)
        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def setUp(self):
        """Deploys, Verifies and adds resources required for testcases
           in cleanup method
        """
        self.oc_node = self.ocp_master_node[0]
        self.prefix = "autotest-%s" % utils.get_random_str()
        _storage_class = self.storage_classes.get(
            'storage_class2', self.storage_classes.get('block_storage_class'))
        self.provisioner = _storage_class["provisioner"]
        self.restsecretnamespace = _storage_class["restsecretnamespace"]
        self.restuser = _storage_class["restuser"]
        self.resturl = _storage_class["resturl"]

        # using pvc size count as 1 by default
        self.pvcsize = 1

        # using pvc count as 10 by default
        self.pvccount = 10

        # create gluster block storage class, PVC and user app pod
        self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
            self.deploy_resouces())

        # verify storage class
        oc_get_yaml(self.oc_node, "sc", self.sc_name)

        # verify pod creation, it's state and get the pod name
        self.pod_name = get_pod_name_from_dc(self.oc_node,
                                             self.dc_name,
                                             timeout=180,
                                             wait_step=3)
        wait_for_pod_be_ready(self.oc_node,
                              self.pod_name,
                              timeout=180,
                              wait_step=3)
        verify_pvc_status_is_bound(self.oc_node, self.pvc_name)

        # create pvc's to test
        self.pvc_list = []
        for pvc in range(self.pvccount):
            test_pvc_name = oc_create_pvc(self.oc_node,
                                          self.sc_name,
                                          pvc_name_prefix=self.prefix,
                                          pvc_size=self.pvcsize)
            self.pvc_list.append(test_pvc_name)
            self.addCleanup(wait_for_resource_absence,
                            self.oc_node,
                            "pvc",
                            test_pvc_name,
                            timeout=600,
                            interval=10)

        for pvc_name in self.pvc_list:
            self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
Пример #30
0
    def _respin_heketi_pod(self):
        h_node, h_url = self.heketi_client_node, self.heketi_server_url
        ocp_node = self.ocp_master_node[0]

        # get heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
        # delete heketi-pod (it restarts the pod)
        oc_delete(ocp_node,
                  "pod",
                  heketi_pod_name,
                  collect_logs=self.heketi_logs_before_delete)
        wait_for_resource_absence(ocp_node, "pod", heketi_pod_name)

        # get new heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
        wait_for_pod_be_ready(ocp_node, heketi_pod_name)

        # check heketi server is running
        err_msg = "Heketi server %s is not alive" % h_url
        self.assertTrue(hello_heketi(h_node, h_url), err_msg)
Пример #31
0
 def create_dc_with_pvc(self, pvc_name, timeout=300, wait_step=10):
     dc_name = oc_create_app_dc_with_io(self.ocp_client[0], pvc_name)
     self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
     self.addCleanup(scale_dc_pod_amount_and_wait, self.ocp_client[0],
                     dc_name, 0)
     pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name)
     wait_for_pod_be_ready(self.ocp_client[0],
                           pod_name,
                           timeout=timeout,
                           wait_step=wait_step)
     return dc_name, pod_name
Пример #32
0
    def create_dcs_with_pvc(self,
                            pvc_names,
                            timeout=600,
                            wait_step=5,
                            dc_name_prefix='autotests-dc',
                            label=None,
                            skip_cleanup=False):
        """Create bunch of DCs with app PODs which use unique PVCs.

        Args:
            pvc_names (str/set/list/tuple): List/set/tuple of PVC names
                to attach to app PODs as part of DCs.
            timeout (int): timeout value, default value is 600 seconds.
            wait_step( int): wait step, default value is 5 seconds.
            dc_name_prefix(str): name prefix for deployement config.
            lable (dict): keys and value for adding label into DC.
        Returns: dictionary with following structure:
            {
                "pvc_name_1": ("dc_name_1", "pod_name_1"),
                "pvc_name_2": ("dc_name_2", "pod_name_2"),
                ...
                "pvc_name_n": ("dc_name_n", "pod_name_n"),
            }
        """
        pvc_names = (pvc_names if isinstance(pvc_names,
                                             (list, set,
                                              tuple)) else [pvc_names])
        dc_and_pod_names, dc_names = {}, {}
        for pvc_name in pvc_names:
            dc_name = oc_create_app_dc_with_io(self.ocp_client[0],
                                               pvc_name,
                                               dc_name_prefix=dc_name_prefix,
                                               label=label)
            dc_names[pvc_name] = dc_name
            if not skip_cleanup:
                self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
        if not skip_cleanup:
            self.addCleanup(scale_dcs_pod_amount_and_wait,
                            self.ocp_client[0],
                            dc_names.values(),
                            0,
                            timeout=timeout,
                            wait_step=wait_step)

        for pvc_name, dc_name in dc_names.items():
            pod_name = get_pod_name_from_dc(self.ocp_client[0], dc_name)
            dc_and_pod_names[pvc_name] = (dc_name, pod_name)
        scale_dcs_pod_amount_and_wait(self.ocp_client[0],
                                      dc_names.values(),
                                      1,
                                      timeout=timeout,
                                      wait_step=wait_step)

        return dc_and_pod_names
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [
            r':.metadata.annotations.'
            r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(
                self.node, self.pvc_name, image=self.io_container_image_cirros)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def setUp(self):
        """Deploys, Verifies and adds resources required for testcases
           in cleanup method
        """
        self.oc_node = self.ocp_master_node[0]
        self.prefix = "autotest-%s" % utils.get_random_str()
        _storage_class = self.storage_classes.get(
            'storage_class2',
            self.storage_classes.get('block_storage_class'))
        self.provisioner = _storage_class["provisioner"]
        self.restsecretnamespace = _storage_class["restsecretnamespace"]
        self.restuser = _storage_class["restuser"]
        self.resturl = _storage_class["resturl"]

        # using pvc size count as 1 by default
        self.pvcsize = 1

        # using pvc count as 10 by default
        self.pvccount = 10

        # create gluster block storage class, PVC and user app pod
        self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
            self.deploy_resouces()
        )

        # verify storage class
        oc_get_yaml(self.oc_node, "sc", self.sc_name)

        # verify pod creation, it's state and get the pod name
        self.pod_name = get_pod_name_from_dc(
            self.oc_node, self.dc_name, timeout=180, wait_step=3
        )
        wait_for_pod_be_ready(
            self.oc_node, self.pod_name, timeout=180, wait_step=3
        )
        verify_pvc_status_is_bound(self.oc_node, self.pvc_name)

        # create pvc's to test
        self.pvc_list = []
        for pvc in range(self.pvccount):
            test_pvc_name = oc_create_pvc(
                self.oc_node, self.sc_name,
                pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
            )
            self.pvc_list.append(test_pvc_name)
            self.addCleanup(
                wait_for_resource_absence, self.oc_node, "pvc", test_pvc_name,
                timeout=600, interval=10
            )

        for pvc_name in self.pvc_list:
            self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
Пример #35
0
    def test_dev_path_mapping_heketi_pod_reboot(self):
        """Validate dev path mapping for heketi pod reboot
        """
        self.node = self.ocp_master_node[0]
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Create file volume with app pod and verify IO's
        # and Compare path, uuid, vg_name
        pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()

        # Fetch heketi-pod name
        heketi_pod_name = openshift_ops.get_pod_name_from_dc(
            self.node, self.heketi_dc_name)

        # Respin heketi-pod (it restarts the pod)
        openshift_ops.oc_delete(self.node,
                                "pod",
                                heketi_pod_name,
                                collect_logs=self.heketi_logs_before_delete)
        self.addCleanup(self._heketi_pod_delete_cleanup)
        openshift_ops.wait_for_resource_absence(self.node, "pod",
                                                heketi_pod_name)

        # Fetch new heketi-pod name
        heketi_pod_name = openshift_ops.get_pod_name_from_dc(
            self.node, self.heketi_dc_name)
        openshift_ops.wait_for_pod_be_ready(self.node, heketi_pod_name)

        # Check heketi server is running
        self.assertTrue(heketi_ops.hello_heketi(h_node, h_url),
                        "Heketi server {} is not alive".format(h_url))

        # Check if IO's are running after respin of heketi pod
        use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
        self.assertNotEqual(
            use_percent, use_percent_after,
            "Failed to execute IO's in the app pod {} after respin".format(
                pod_name))
    def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
        """Validate retain policy for gluster-block after PVC deletion"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)

        try:
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
            oc_delete(self.node, 'dc', dc_name)

        # get the name of volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)

        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']
        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        # checking the retainPolicy of pvc
        self.assertEqual(reclaim_policy, 'Retain')

        # delete the pvc
        oc_delete(self.node, 'pvc', self.pvc_name)

        # check if pv is also deleted or not
        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        # getting the blockvol list
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertIn(vol_id, blocklist)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_id)
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertNotIn(vol_id, blocklist)
        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_storage_class_mandatory_params_glusterfile(self):
        """Validate storage-class creation with mandatory parameters"""

        # create secret
        self.secret_name = oc_create_secret(
            self.node,
            namespace=self.sc.get('secretnamespace', 'default'),
            data_key=self.heketi_cli_key,
            secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
        self.addCleanup(
            oc_delete, self.node, 'secret', self.secret_name)

        # create storage class with mandatory parameters only
        sc_name = oc_create_sc(
            self.node, provisioner='kubernetes.io/glusterfs',
            resturl=self.sc['resturl'], restuser=self.sc['restuser'],
            secretnamespace=self.sc['secretnamespace'],
            secretname=self.secret_name
        )
        self.addCleanup(oc_delete, self.node, 'sc', sc_name)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_sc.log"
        cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))

        cmd = "ls -lrt %s" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))

        cmd = "rm -rf %s" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
    def test_storage_class_mandatory_params_glusterfile(self):
        """Validate storage-class creation with mandatory parameters"""

        # create secret
        self.secret_name = oc_create_secret(
            self.node,
            namespace=self.sc.get('secretnamespace', 'default'),
            data_key=self.heketi_cli_key,
            secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
        self.addCleanup(oc_delete, self.node, 'secret', self.secret_name)

        # create storage class with mandatory parameters only
        sc_name = oc_create_sc(self.node,
                               provisioner='kubernetes.io/glusterfs',
                               resturl=self.sc['resturl'],
                               restuser=self.sc['restuser'],
                               secretnamespace=self.sc['secretnamespace'],
                               secretname=self.secret_name)
        self.addCleanup(oc_delete, self.node, 'sc', sc_name)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(
            self.node, pvc_name, image=self.io_container_image_cirros)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_sc.log"
        cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))

        cmd = "ls -lrt %s" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))

        cmd = "rm -rf %s" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
    def validate_multipath_info(self, hacount):
        """validates multipath command on the pod node

        Args:
            hacount (int): hacount for which multipath to be checked
        """
        # create pod using pvc created
        dc_name = oc_create_app_dc_with_io(
            self.ocp_master_node[0],
            self.pvc_name,
            image=self.io_container_image_cirros)
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
                        dc_name, 0)

        wait_for_pod_be_ready(self.ocp_master_node[0],
                              pod_name,
                              timeout=120,
                              wait_step=3)

        # Get pod info
        pod_info = oc_get_pods(self.ocp_master_node[0],
                               selector='deploymentconfig=%s' % dc_name)
        node = pod_info[pod_name]['node']

        # Find iqn from volume info
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(self.ocp_master_node[0], 'pv', custom,
                                        pv_name)[0]
        vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                           self.heketi_server_url,
                                           vol_id,
                                           json=True)
        iqn = vol_info['blockvolume']['iqn']

        # Get the paths info from the node
        devices = get_iscsi_block_devices_by_path(node, iqn).keys()
        self.assertEqual(hacount, len(devices))

        # Validate mpath
        mpaths = set()
        for device in devices:
            mpaths.add(get_mpath_name_from_device_name(node, device))
        self.assertEqual(1, len(mpaths))
        validate_multipath_pod(self.ocp_master_node[0], pod_name, hacount,
                               list(mpaths)[0])
    def _get_es_pod_and_verify_iscsi_sessions(self):
        """Fetch es pod and verify iscsi sessions"""
        pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"

        # Get the elasticsearch pod name nad PVC name
        es_pod = openshift_ops.get_pod_name_from_dc(
            self._master, self._logging_es_dc)
        pvc_name = openshift_ops.oc_get_custom_resource(
            self._master, "pod", pvc_custom, es_pod)[0]

        # Validate iscsi and multipath
        self.verify_iscsi_sessions_and_multipath(
            pvc_name, self._logging_es_dc,
            heketi_server_url=self._registry_heketi_server_url,
            is_registry_gluster=True)
        return es_pod, pvc_name
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        heketi_volume_delete(self.heketi_client_node,
                             self.heketi_server_url, vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def validate_multipath_info(self, hacount):
        """validates multipath command on the pod node

        Args:
            hacount (int): hacount for which multipath to be checked
        """
        # create pod using pvc created
        dc_name = oc_create_app_dc_with_io(
            self.ocp_master_node[0], self.pvc_name
        )
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.ocp_master_node[0], dc_name, 0
        )

        wait_for_pod_be_ready(
            self.ocp_master_node[0], pod_name, timeout=120, wait_step=3
        )

        # Get pod info
        pod_info = oc_get_pods(
            self.ocp_master_node[0], selector='deploymentconfig=%s' % dc_name)
        node = pod_info[pod_name]['node']

        # Find iqn from volume info
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(
            self.ocp_master_node[0], 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
        iqn = vol_info['blockvolume']['iqn']

        # Get the paths info from the node
        devices = get_iscsi_block_devices_by_path(node, iqn).keys()
        self.assertEqual(hacount, len(devices))

        # Validate mpath
        mpaths = set()
        for device in devices:
            mpaths.add(get_mpath_name_from_device_name(node, device))
        self.assertEqual(1, len(mpaths))
        validate_multipath_pod(
            self.ocp_master_node[0], pod_name, hacount, list(mpaths)[0])
    def test_heketi_metrics_heketipod_failure(self):
        """Validate heketi metrics after heketi pod failure"""
        scale_dc_pod_amount_and_wait(
            self.ocp_master_node[0], self.heketi_dc_name, pod_amount=0)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
            self.heketi_dc_name, pod_amount=1)

        # verify that metrics is not accessable when heketi pod is down
        with self.assertRaises(exceptions.ExecutionError):
            get_heketi_metrics(
                self.heketi_client_node,
                self.heketi_server_url,
                prometheus_format=True)

        scale_dc_pod_amount_and_wait(
            self.ocp_master_node[0], self.heketi_dc_name, pod_amount=1)

        pod_name = get_pod_name_from_dc(
            self.ocp_master_node[0], self.heketi_dc_name, self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)

        for i in range(3):
            vol = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url, 1, json=True)

            self.assertTrue(vol)

            self.addCleanup(
                heketi_volume_delete,
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'],
                raise_on_error=False)

            vol_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)

            self.assertIn(vol['id'], vol_list)

        self.verify_heketi_metrics_with_topology_info()
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, heketi_url, node_id, json=True)
            if (node_info['state'].lower() != 'online' or
                    not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(
                    self.heketi_client_node, heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(
                    heketi_ops.heketi_node_enable,
                    self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(
                        heketi_ops.heketi_device_enable,
                        self.heketi_client_node, heketi_url, device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest(
                "Could not find 3 online nodes with, "
                "at least, 1 online device having free space "
                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node, obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = (
                "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(
                self.node, obj_name=pvc_name,
                event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0, "Failed to write data on the expanded PVC")
    def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
        """Validate dynamic provisioning for gluster file when heketi pod down
        """
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        sc_name = self.create_storage_class()

        # Create PVC
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix="autotest-file", sc_name=sc_name
        )

        # Create app POD with attached volume
        app_1_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(
            self.node, app_1_pod_name, timeout=60, wait_step=2)

        # Write data to the app POD
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix="autotest-file2", sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name, raise_on_absence=False
        )

        # Create second app POD
        app_2_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)

        # Bring Heketi POD back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=60, wait_step=2)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
    def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
        """Validate PVC with glusterblock creation when heketi pod is down"""
        datafile_path = '/mnt/fake_file_for_%s' % self.id()

        # Create DC with attached PVC
        sc_name = self.create_storage_class()
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name)

        # Write test data
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        # Create second PVC
        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix='autotest-block2', sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name
        )

        # Create second app POD
        app_2_dc_name = oc_create_app_dc_with_io(self.node, app_2_pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', app_2_dc_name)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.node, app_2_dc_name, 0)
        app_2_pod_name = get_pod_name_from_dc(self.node, app_2_dc_name)

        # Bring Heketi pod back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=150, wait_step=3)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
    def initiator_side_failures(self):

        # get storage ips of glusterfs pods
        keys = self.gluster_servers
        gluster_ips = []
        for key in keys:
            gluster_ips.append(self.gluster_servers_info[key]['storage'])
        gluster_ips.sort()

        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # find iqn and hacount from volume info
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
        iqn = vol_info['blockvolume']['iqn']
        hacount = int(self.sc['hacount'])

        # create app pod
        dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)

        # When we have to verify iscsi login  devices & mpaths, we run it twice
        for i in range(2):

            # get node hostname from pod info
            pod_info = oc_get_pods(
                self.node, selector='deploymentconfig=%s' % dc_name)
            node = pod_info[pod_name]['node']

            # get the iscsi sessions info from the node
            iscsi = get_iscsi_session(node, iqn)
            self.assertEqual(hacount, len(iscsi))
            iscsi.sort()
            self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))

            # get the paths info from the node
            devices = get_iscsi_block_devices_by_path(node, iqn).keys()
            self.assertEqual(hacount, len(devices))

            # get mpath names and verify that only one mpath is there
            mpaths = set()
            for device in devices:
                mpaths.add(get_mpath_name_from_device_name(node, device))
            self.assertEqual(1, len(mpaths))

            validate_multipath_pod(
                self.node, pod_name, hacount, mpath=list(mpaths)[0])

            # When we have to verify iscsi session logout, we run only once
            if i == 1:
                break

            # make node unschedulabe where pod is running
            oc_adm_manage_node(
                self.node, '--schedulable=false', nodes=[node])

            # make node schedulabe where pod is running
            self.addCleanup(
                oc_adm_manage_node, self.node, '--schedulable=true',
                nodes=[node])

            # delete pod so it get respun on any other node
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

            # wait for pod to come up
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # get the iscsi session from the previous node to verify logout
            iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
            self.assertFalse(iscsi)