Exemplo n.º 1
0
    def test_validate_logging_pods_and_pvc(self):
        """Validate logging pods and PVC"""

        # Wait for kibana pod to be ready
        kibana_pod = openshift_ops.get_pod_name_from_dc(
            self._master, self._logging_kibana_dc)
        openshift_ops.wait_for_pod_be_ready(self._master, kibana_pod)

        # Wait for fluentd pods to be ready
        fluentd_custom = [
            ":.status.desiredNumberScheduled",
            ":.spec.template.metadata.labels"
        ]
        count_and_selector = openshift_ops.oc_get_custom_resource(
            self._master, "ds", fluentd_custom, self._logging_fluentd_ds)
        selector = count_and_selector[1][4:].replace(":", "=")
        openshift_ops.wait_for_pods_be_ready(self._master,
                                             int(count_and_selector[0]),
                                             selector)

        # Wait for PVC to be bound and elasticsearch pod to be ready
        es_pod = openshift_ops.get_pod_name_from_dc(self._master,
                                                    self._logging_es_dc)
        pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
        pvc_name = openshift_ops.oc_get_custom_resource(
            self._master, "pod", pvc_custom, es_pod)[0]
        openshift_ops.verify_pvc_status_is_bound(self._master, pvc_name)
        openshift_ops.wait_for_pod_be_ready(self._master, es_pod)

        # Validate iscsi and multipath
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            self._logging_es_dc,
            heketi_server_url=self._registry_heketi_server_url,
            is_registry_gluster=True)
Exemplo n.º 2
0
    def configure_node_to_run_gluster_pod(self, storage_hostname):
        ports = ("24010", "3260", "111", "2222", "24007", "24008",
                 "49152:49664")
        iptables_rule_pattern = (
            "-p tcp -m state --state NEW -m %s --%s %s -j ACCEPT")
        iptables_rule_chain = "OS_FIREWALL_ALLOW"
        iptables_rules = []
        for port in ports:
            if ":" in port:
                iptables_rules.append(iptables_rule_pattern %
                                      ("multiport", "dports", port))
            else:
                iptables_rules.append(iptables_rule_pattern %
                                      ("tcp", "dport", port))
        node_add_iptables_rules(storage_hostname, iptables_rule_chain,
                                iptables_rules)
        self.addCleanup(node_delete_iptables_rules, storage_hostname,
                        iptables_rule_chain, iptables_rules)

        gluster_host_label = "glusterfs=storage-host"
        gluster_pod_label = "glusterfs=storage-pod"
        oc_label(self.ocp_client[0], "node", storage_hostname,
                 gluster_host_label)
        self.addCleanup(wait_for_pods_be_ready,
                        self.ocp_client[0],
                        len(self.gluster_servers),
                        selector=gluster_pod_label)
        self.addCleanup(oc_label, self.ocp_client[0], "node", storage_hostname,
                        "glusterfs-")

        wait_for_pods_be_ready(self.ocp_client[0],
                               len(self.gluster_servers) + 1,
                               selector=gluster_pod_label)
Exemplo n.º 3
0
    def _create_dcs_and_check_brick_placement(self, prefix, sc_name,
                                              heketi_zone_checking,
                                              zone_count):
        app_pods, count, label = [], 5, "testlabel=autotest"

        # Create multiple PVCs using storage class
        pvc_names = self.create_and_wait_for_pvcs(pvc_name_prefix=prefix,
                                                  pvc_amount=count,
                                                  sc_name=sc_name)

        # Create app dcs with I/O
        for pvc_name in pvc_names:
            app_dc = openshift_ops.oc_create_app_dc_with_io(
                self.node,
                pvc_name=pvc_name,
                dc_name_prefix=prefix,
                image=self.io_container_image_cirros)
            self.addCleanup(openshift_ops.oc_delete, self.node, 'dc', app_dc)

            # Get pod names and label them
            pod_name = openshift_ops.get_pod_name_from_dc(self.node, app_dc)
            openshift_ops.oc_label(self.node, 'pod', pod_name, label)
            app_pods.append(pod_name)

        # Wait for pods to be ready with the help of label selector
        openshift_ops.wait_for_pods_be_ready(self.node, count, label)

        # Validate brick placement in heketi zones
        self._validate_brick_placement_in_correct_zone_or_with_expand_pvc(
            heketi_zone_checking, pvc_name, zone_count)

        return app_pods
    def test_respin_prometheus_pod(self, motive="delete"):
        """Validate respin of prometheus pod"""
        if motive == 'drain':

            # Get the number of infra nodes
            infra_node_count_cmd = (
                'oc get nodes '
                '--no-headers -l node-role.kubernetes.io/infra=true|wc -l')
            infra_node_count = command.cmd_run(
                infra_node_count_cmd, self._master)

            # Skip test case if number infra nodes are less than #2
            if int(infra_node_count) < 2:
                self.skipTest('Available number of infra nodes "{}", it should'
                              ' be more than 1'.format(infra_node_count))

        # Get PVC names and pod names
        pod_names, pvc_names = self._get_pod_names_and_pvc_names()

        # Validate iscsi and multipath
        for (pvc_name, pod_name) in zip(pvc_names, pod_names):
            _, _, node = self.verify_iscsi_sessions_and_multipath(
                pvc_name, pod_name[0], rtype='pod',
                heketi_server_url=self._registry_heketi_server_url,
                is_registry_gluster=True)

        # Delete the prometheus pods
        if motive == 'delete':
            for pod_name in pod_names:
                openshift_ops.oc_delete(self._master, 'pod', pod_name[0])

        # Drain the node
        elif motive == 'drain':
            drain_cmd = ('oc adm drain {} --force=true --ignore-daemonsets '
                         '--delete-local-data'.format(node))
            command.cmd_run(drain_cmd, hostname=self._master)

            # Cleanup to make node schedulable
            cmd_schedule = (
                'oc adm manage-node {} --schedulable=true'.format(node))
            self.addCleanup(
                command.cmd_run, cmd_schedule, hostname=self._master)

        # Validate that there should be no or zero pods in non-running state
        field_selector, pod_count = "status.phase!=Running", 0
        openshift_ops.wait_for_pods_be_ready(
            self._master, pod_count, field_selector=field_selector)

        # Validate iscsi and multipath
        for (pvc_name, pod_name) in zip(pvc_names, pod_names):
            self.verify_iscsi_sessions_and_multipath(
                pvc_name, pod_name[0], rtype='pod',
                heketi_server_url=self._registry_heketi_server_url,
                is_registry_gluster=True)

        # Try to fetch metric from prometheus pod
        self._fetch_metric_from_promtheus_pod(metric='kube_node_info')
    def _wait_for_gluster_pod_be_ready(self, g_pod_list_before):
        """Wait for the gluster pods to be in ready state"""
        openshift_ops.switch_oc_project(
            self._master, self._registry_project_name)

        # Check if the gluster pods are in ready state
        try:
            pod_count = len(self._registry_servers_info.keys())
            openshift_ops.wait_for_pods_be_ready(
                self._master, pod_count, "glusterfs-node=pod",
                timeout=120, wait_step=6)
        except exceptions.ExecutionError:
            self._guster_pod_delete(g_pod_list_before)
Exemplo n.º 6
0
    def test_docker_service_restart(self):
        """Validate docker service should not fail after restart"""

        # Skip the TC if independent mode deployment
        if not self.is_containerized_gluster():
            self.skipTest(
                "Skipping this test case as LVM script is not available in "
                "independent mode deployment")

        # Skip the TC if docker storage driver other than devicemapper
        pod_host_ip = self.pod_name[0]["pod_host_ip"]
        cmd = "docker info -f '{{json .Driver}}'"
        device_driver = command.cmd_run(cmd, pod_host_ip)
        if device_driver != '"devicemapper"':
            self.skipTest(
                "Skipping this test case as docker storage driver is not "
                "set to devicemapper")

        # Validate LVM environment is present
        custom = (r'":spec.containers[*].env[?(@.name==\"{}\")]'
                  r'.value"'.format(ENV_NAME))
        env_var_value = openshift_ops.oc_get_custom_resource(
            self.oc_node, "pod", custom, self.h_pod_name)[0]
        err_msg = "Heketi {} environment should has {}".format(
            ENV_NAME, ENV_VALUE)
        self.assertEqual(env_var_value, ENV_VALUE, err_msg)

        # Check docker status is active
        command.cmd_run(DOCKER_SERVICE.format("is-active"), pod_host_ip)

        # Restart the docker service
        self.addCleanup(self._check_docker_status_is_active, pod_host_ip)
        command.cmd_run(DOCKER_SERVICE.format("restart"), pod_host_ip)

        # Wait for docker service to become active
        self._wait_for_docker_service_status(pod_host_ip, "active", "running")

        # Wait for glusterfs pods to be ready
        openshift_ops.wait_for_pods_be_ready(self.oc_node,
                                             len(self.gluster_servers),
                                             "glusterfs=storage-pod")

        # Check the docker pool is available after docker restart
        cmd = "ls -lrt /dev/docker-vg/docker-pool"
        command.cmd_run(cmd, pod_host_ip)

        # Create PVC after docker restart
        self.create_and_wait_for_pvcs()
Exemplo n.º 7
0
    def test_verify_metrics_data_during_gluster_pod_respin(self):
        # Add check for CRS version
        switch_oc_project(self.master, self.registry_project_name)
        if not self.is_containerized_gluster():
            self.skipTest("Skipping this test case as CRS version check "
                          "can not be implemented")

        # Verify multipath and iscsi for cassandra pod
        switch_oc_project(self.master, self.metrics_project_name)
        hawkular_cassandra, pvc_name, iqn, _, node = (
            self.verify_cassandra_pod_multipath_and_iscsi())

        # Get the ip of active path
        device_and_ip = get_iscsi_block_devices_by_path(node, iqn)
        mpath = get_mpath_name_from_device_name(node,
                                                list(device_and_ip.keys())[0])
        active_passive_dict = get_active_and_enabled_devices_from_mpath(
            node, mpath)
        node_ip = device_and_ip[active_passive_dict['active'][0]]

        # Get the name of gluster pod from the ip
        switch_oc_project(self.master, self.registry_project_name)
        gluster_pods = get_ocp_gluster_pod_details(self.master)
        pod_name = list(
            filter(lambda pod: (pod["pod_host_ip"] == node_ip),
                   gluster_pods))[0]["pod_name"]
        err_msg = "Failed to get the gluster pod name {} with active path"
        self.assertTrue(pod_name, err_msg.format(pod_name))

        # Delete the pod
        oc_delete(self.master, 'pod', pod_name)
        wait_for_resource_absence(self.master, 'pod', pod_name)

        # Wait for new pod to come up
        pod_count = len(self.registry_servers_info.keys())
        selector = "glusterfs-node=pod"
        wait_for_pods_be_ready(self.master, pod_count, selector)

        # Validate cassandra pod state, multipath and issci
        switch_oc_project(self.master, self.metrics_project_name)
        wait_for_pod_be_ready(self.master, hawkular_cassandra, timeout=2)
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            self.metrics_rc_hawkular_cassandra,
            rtype='rc',
            heketi_server_url=self.registry_heketi_server_url,
            is_registry_gluster=True)
    def test_promethoues_pods_and_pvcs(self):
        """Validate prometheus pods and PVC"""
        # Wait for PVCs to be bound
        pod_names, pvc_names = self._get_pod_names_and_pvc_names()
        openshift_ops.wait_for_pvcs_be_bound(self._master, pvc_names)

        # Validate that there should be no or zero pods in non-running state
        field_selector, pod_count = "status.phase!=Running", 0
        openshift_ops.wait_for_pods_be_ready(
            self._master, pod_count, field_selector=field_selector)

        # Validate iscsi and multipath
        for (pvc_name, pod_name) in zip(pvc_names, pod_names):
            self.verify_iscsi_sessions_and_multipath(
                pvc_name, pod_name[0], rtype='pod',
                heketi_server_url=self._registry_heketi_server_url,
                is_registry_gluster=True)

        # Try to fetch metric from prometheus pod
        self._fetch_metric_from_promtheus_pod(metric='kube_node_info')