Example #1
0
 def _check_heketi_pod_to_come_up_after_changing_env(self):
     heketi_pod = openshift_ops.get_pod_names_from_dc(
         self.oc_node, self.heketi_dc_name)[0]
     openshift_ops.wait_for_resource_absence(
         self.oc_node, "pod", heketi_pod)
     new_heketi_pod = openshift_ops.get_pod_names_from_dc(
         self.oc_node, self.heketi_dc_name)[0]
     openshift_ops.wait_for_pod_be_ready(
         self.oc_node, new_heketi_pod, wait_step=20)
    def test_validate_pvc_in_multiple_app_pods(self):
        """Validate the use of a same claim in multiple app pods"""
        replicas = 5

        # Create PVC
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)

        # Create DC with application PODs
        dc_name = oc_create_app_dc_with_io(
            self.node,
            pvc_name,
            replicas=replicas,
            image=self.io_container_image_cirros)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        # Wait for all the PODs to be ready
        pod_names = get_pod_names_from_dc(self.node, dc_name)
        self.assertEqual(replicas, len(pod_names))
        for pod_name in pod_names:
            wait_for_pod_be_ready(self.node, pod_name)

        # Create files in each of the PODs
        for pod_name in pod_names:
            self.cmd_run("oc exec {0} -- touch /mnt/temp_{0}".format(pod_name))

        # Check that all the created files are available at once
        ls_out = self.cmd_run("oc exec %s -- ls /mnt" % pod_names[0]).split()
        for pod_name in pod_names:
            self.assertIn("temp_%s" % pod_name, ls_out)
    def _check_heketi_and_gluster_pod_after_node_reboot(self, heketi_node):
        openshift_ops.switch_oc_project(
            self._master, self.storage_project_name)
        heketi_pod = openshift_ops.get_pod_names_from_dc(
            self._master, self.heketi_dc_name)[0]

        # Wait for heketi pod to become ready and running
        openshift_ops.wait_for_pod_be_ready(self._master, heketi_pod)
        heketi_ops.hello_heketi(self._master, self.heketi_server_url)

        # Wait for glusterfs pods to become ready if hosted on same node
        heketi_node_ip = openshift_ops.oc_get_custom_resource(
            self._master, 'pod', '.:status.hostIP', heketi_pod)[0]
        if heketi_node_ip in self.gluster_servers:
            gluster_pod = openshift_ops.get_gluster_pod_name_for_specific_node(
                self._master, heketi_node)

            # Wait for glusterfs pod to become ready
            openshift_ops.wait_for_pod_be_ready(self._master, gluster_pod)
            services = (
                ("glusterd", "running"), ("gluster-blockd", "running"),
                ("tcmu-runner", "running"), ("gluster-block-target", "exited"))
            for service, state in services:
                openshift_ops.check_service_status_on_pod(
                    self._master, gluster_pod, service, "active", state)
    def test_validate_pvc_in_multiple_app_pods(self):
        """Validate the use of a same claim in multiple app pods"""
        replicas = 5

        # Create PVC
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)

        # Create DC with application PODs
        dc_name = oc_create_app_dc_with_io(
            self.node, pvc_name, replicas=replicas)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        # Wait for all the PODs to be ready
        pod_names = get_pod_names_from_dc(self.node, dc_name)
        self.assertEqual(replicas, len(pod_names))
        for pod_name in pod_names:
            wait_for_pod_be_ready(self.node, pod_name)

        # Create files in each of the PODs
        for pod_name in pod_names:
            self.cmd_run("oc exec {0} -- touch /mnt/temp_{0}".format(pod_name))

        # Check that all the created files are available at once
        ls_out = self.cmd_run("oc exec %s -- ls /mnt" % pod_names[0]).split()
        for pod_name in pod_names:
            self.assertIn("temp_%s" % pod_name, ls_out)
Example #5
0
    def test_lvm_script_with_wrapper_environment_value(self, env_var_value):
        """Validate the creation, deletion, etc operations when
        HEKETI_LVM_WRAPPER has different values assigned"""

        # Skip the TC if independent mode deployment
        if not self.is_containerized_gluster():
            self.skipTest(
                "Skipping this test as LVM script is not available in "
                "independent mode deployment")

        h_client, h_url = self.heketi_client_node, self.heketi_server_url

        # Set different values to HEKETI_LVM_WRAPPER
        if env_var_value != ENV_VALUE:
            cmd = 'oc set env dc/{} {}={}'
            command.cmd_run(
                cmd.format(self.heketi_dc_name, ENV_NAME, env_var_value),
                self.oc_node)
            self.addCleanup(
                self._check_heketi_pod_to_come_up_after_changing_env)
            self.addCleanup(
                command.cmd_run,
                cmd.format(self.heketi_dc_name, ENV_NAME, ENV_VALUE),
                self.oc_node)
            self._check_heketi_pod_to_come_up_after_changing_env()

        # Get new value associated with HEKETI_LVM_WRAPPER
        heketi_pod = openshift_ops.get_pod_names_from_dc(
            self.oc_node, self.heketi_dc_name)[0]
        custom = (
            "{{.spec.containers[*].env[?(@.name==\"{}\")].value}}".format(
                ENV_NAME))
        cmd = ("oc get pod {} -o=jsonpath='{}'".format(heketi_pod, custom))
        get_env_value = command.cmd_run(cmd, self.oc_node)

        # Validate new value assigned to heketi pod
        err_msg = "Failed to assign new value {} to {}".format(
            env_var_value, heketi_pod)
        self.assertEqual(get_env_value, env_var_value, err_msg)

        # Get the date before creating heketi volume
        cmd_date = "date -u '+%Y-%m-%d %T'"
        _date, _time = command.cmd_run(cmd_date, self.oc_node).split(" ")

        if env_var_value == ENV_FALSE_VALUE:
            # Heketi volume creation should fail when HEKETI_LVM_WRAPPER
            # assigned to /usr/bin/false
            err_msg = "Unexpectedly: volume has been created"
            with self.assertRaises(AssertionError, msg=err_msg):
                vol_info = heketi_ops.heketi_volume_create(
                    h_client, h_url, self.volume_size, json=True)
                self.addCleanup(
                    heketi_ops.heketi_volume_delete, h_client,
                    h_url, vol_info["bricks"][0]["volume"])
        else:
            # Heketi volume creation should succeed when HEKETI_LVM_WRAPPER
            # assigned value other than /usr/bin/false
            vol_info = heketi_ops.heketi_volume_create(
                h_client, h_url, self.volume_size, json=True)
            self.addCleanup(
                heketi_ops.heketi_volume_delete,
                h_client, h_url, vol_info["bricks"][0]["volume"])
            self.assertTrue(vol_info, ("Failed to create heketi "
                            "volume of size {}".format(self.volume_size)))

        # Get heketi logs with specific time
        cmd_logs = "oc logs {} --since-time {}T{}Z | grep {}".format(
            heketi_pod, _date, _time, "/usr/sbin/lvm")

        # Validate assigned value of HEKETI_LVM_WRAPPER is present in
        # heketi log
        for w in waiter.Waiter(60, 10):
            logs = command.cmd_run(cmd_logs, self.oc_node)
            status_match = re.search(env_var_value, logs)
            if status_match:
                break
        err_msg = "Heketi unable to execute LVM commands with {}".format(
            env_var_value)
        self.assertTrue(status_match, err_msg)