def validate_multipath_pod(hostname, podname, hacount, mpath):
    """Validate multipath for given app-pod.

     Args:
         hostname (str): ocp master node name
         podname (str): app-pod name for which we need to validate
                        multipath. ex : nginx1
         hacount (int): multipath count or HA count. ex: 3
         mpath (str): multipath value to check
     Returns:
         bool: True if successful, otherwise raises exception
    """

    cmd = "oc get pods -o wide | grep %s | awk '{print $7}'" % podname
    pod_nodename = cmd_run(cmd, hostname)

    active_node_count, enable_node_count = (1, hacount - 1)
    cmd = "multipath -ll %s | grep 'status=active' | wc -l" % mpath
    active_count = int(cmd_run(cmd, pod_nodename))
    assert active_node_count == active_count, (
        "Active node count on %s for %s is %s and not 1" % (
            pod_nodename, podname, active_count))

    cmd = "multipath -ll %s | grep 'status=enabled' | wc -l" % mpath
    enable_count = int(cmd_run(cmd, pod_nodename))
    assert enable_node_count == enable_count, (
        "Passive node count on %s for %s is %s and not %s" % (
            pod_nodename, podname, enable_count, enable_node_count))

    g.log.info("Validation of multipath for %s is successfull" % podname)
    return True
Exemplo n.º 2
0
def _get_openshift_storage_version_str(hostname=None):
    """Gets OpenShift Storage version from gluster pod's buildinfo directory.

    Args:
        hostname (str): Node on which the ocp command should run.
    Returns:
        str : Openshift Storage version, i.e. '3.11.3'
    Raises: 'NotImplementedError' if CRS setup is provided.
    """
    if not hostname:
        hostname = list(g.config['ocp_servers']['client'].keys())[0]
    get_gluster_pod_cmd = (
        "oc get --no-headers=true pods --selector glusterfs-node=pod "
        "-o=custom-columns=:.metadata.name | tail -1")
    gluster_pod = command.cmd_run(get_gluster_pod_cmd, hostname)
    if not gluster_pod:
        raise NotImplementedError(
            "OCS version check cannot be done on the standalone setup.")

    buildinfo_cmd = (
        "oc rsh %s "
        "find . -name \"Dockerfile-rhgs3-rhgs-server-rhel7*\" "
        r"-exec awk '/%s/{print $0}' {} \; "
        "| tail -1" %
        (gluster_pod, BUILDS_LABEL_TAG_REGEX))
    out = command.cmd_run(buildinfo_cmd, hostname)

    build_tag_match = re.search(BUILDS_LABEL_TAG_REGEX, out)
    if not build_tag_match:
        error_msg = "Unexpected BUILD LABEL tag expression: '%s'" % out
        g.log.error(error_msg)
        raise exceptions.ExecutionError(error_msg)

    return (build_tag_match.group(2)).strip()
Exemplo n.º 3
0
    def _set_zone_check_env_in_heketi_dc(self, heketi_zone_checking):
        set_env = (
            'HEKETI_POST_REQUEST_VOLUME_OPTIONS="user.heketi.zone-checking'
            ' {}"').format(heketi_zone_checking)
        unset_env, e_list = "HEKETI_POST_REQUEST_VOLUME_OPTIONS-", "--list"
        env = set_env.replace('"', '')

        # Check if zone checking env is already set, then do nothing
        cmd_list_env = ("oc set env dc/{} {}".format(self.heketi_dc_name,
                                                     e_list))
        env_list = command.cmd_run(cmd_list_env, hostname=self.node)
        if env in env_list:
            return

        # Set zone checking env option inside heketi dc
        cmd_set_env = ("oc set env dc/{} {}".format(self.heketi_dc_name,
                                                    set_env))
        cmd_unset_env = ("oc set env dc/{} {}".format(self.heketi_dc_name,
                                                      unset_env))
        command.cmd_run(cmd_set_env, hostname=self.node)
        self._check_heketi_pod_to_come_up_after_changing_env()
        self.addCleanup(self._check_heketi_pod_to_come_up_after_changing_env)
        self.addCleanup(command.cmd_run, cmd_unset_env, hostname=self.node)

        # List all envs and validate if env is set successfully
        new_env_list = command.cmd_run(cmd_list_env, hostname=self.node)
        self.assertIn(env, new_env_list, "Failed to set env {}".format(env))
def validate_multipath_pod(hostname, podname, hacount, mpath):
    """Validate multipath for given app-pod.

     Args:
         hostname (str): ocp master node name
         podname (str): app-pod name for which we need to validate
                        multipath. ex : nginx1
         hacount (int): multipath count or HA count. ex: 3
         mpath (str): multipath value to check
     Returns:
         bool: True if successful, otherwise raises exception
    """

    cmd = "oc get pods -o wide | grep %s | awk '{print $7}'" % podname
    pod_nodename = cmd_run(cmd, hostname)

    active_node_count, enable_node_count = (1, hacount - 1)
    cmd = "multipath -ll %s | grep 'status=active' | wc -l" % mpath
    active_count = int(cmd_run(cmd, pod_nodename))
    assert active_node_count == active_count, (
        "Active node count on %s for %s is %s and not 1" % (
            pod_nodename, podname, active_count))

    cmd = "multipath -ll %s | grep 'status=enabled' | wc -l" % mpath
    enable_count = int(cmd_run(cmd, pod_nodename))
    assert enable_node_count == enable_count, (
        "Passive node count on %s for %s is %s and not %s" % (
            pod_nodename, podname, enable_count, enable_node_count))

    g.log.info("Validation of multipath for %s is successfull" % podname)
    return True
Exemplo n.º 5
0
 def _check_docker_status_is_active(self, pod_host_ip):
     try:
         command.cmd_run(DOCKER_SERVICE.format("is-active"), pod_host_ip)
     except Exception as err:
         if "inactive" in err:
             command.cmd_run(DOCKER_SERVICE.format("start"), pod_host_ip)
             self._wait_for_docker_service_status(
                 pod_host_ip, "active", "running")
    def configure_node_to_run_gluster_node(self, storage_hostname):
        glusterd_status_cmd = "systemctl is-active glusterd"
        command.cmd_run(glusterd_status_cmd, storage_hostname)

        ports = ("24010", "3260", "111", "22", "24007", "24008", "49152-49664")
        add_port = " ".join(["--add-port=%s/tcp" % port for port in ports])
        add_firewall_rule_cmd = "firewall-cmd --zone=public %s" % add_port
        command.cmd_run(add_firewall_rule_cmd, storage_hostname)
    def test_respin_prometheus_pod(self, motive="delete"):
        """Validate respin of prometheus pod"""
        if motive == 'drain':

            # Get the number of infra nodes
            infra_node_count_cmd = (
                'oc get nodes '
                '--no-headers -l node-role.kubernetes.io/infra=true|wc -l')
            infra_node_count = command.cmd_run(
                infra_node_count_cmd, self._master)

            # Skip test case if number infra nodes are less than #2
            if int(infra_node_count) < 2:
                self.skipTest('Available number of infra nodes "{}", it should'
                              ' be more than 1'.format(infra_node_count))

        # Get PVC names and pod names
        pod_names, pvc_names = self._get_pod_names_and_pvc_names()

        # Validate iscsi and multipath
        for (pvc_name, pod_name) in zip(pvc_names, pod_names):
            _, _, node = self.verify_iscsi_sessions_and_multipath(
                pvc_name, pod_name[0], rtype='pod',
                heketi_server_url=self._registry_heketi_server_url,
                is_registry_gluster=True)

        # Delete the prometheus pods
        if motive == 'delete':
            for pod_name in pod_names:
                openshift_ops.oc_delete(self._master, 'pod', pod_name[0])

        # Drain the node
        elif motive == 'drain':
            drain_cmd = ('oc adm drain {} --force=true --ignore-daemonsets '
                         '--delete-local-data'.format(node))
            command.cmd_run(drain_cmd, hostname=self._master)

            # Cleanup to make node schedulable
            cmd_schedule = (
                'oc adm manage-node {} --schedulable=true'.format(node))
            self.addCleanup(
                command.cmd_run, cmd_schedule, hostname=self._master)

        # Validate that there should be no or zero pods in non-running state
        field_selector, pod_count = "status.phase!=Running", 0
        openshift_ops.wait_for_pods_be_ready(
            self._master, pod_count, field_selector=field_selector)

        # Validate iscsi and multipath
        for (pvc_name, pod_name) in zip(pvc_names, pod_names):
            self.verify_iscsi_sessions_and_multipath(
                pvc_name, pod_name[0], rtype='pod',
                heketi_server_url=self._registry_heketi_server_url,
                is_registry_gluster=True)

        # Try to fetch metric from prometheus pod
        self._fetch_metric_from_promtheus_pod(metric='kube_node_info')
Exemplo n.º 8
0
    def test_metrics_during_cassandra_pod_respin(self, motive='delete'):
        """Validate cassandra pod respin"""
        old_cassandra_pod, pvc_name, _, _, node = (
            self.verify_cassandra_pod_multipath_and_iscsi())

        if motive == 'delete':
            # Delete the cassandra pod
            oc_delete(self.master, 'pod', old_cassandra_pod)
            self.addCleanup(self.cassandra_pod_delete_cleanup)
        elif motive == 'drain':
            # Get the number of infra nodes
            infra_node_count_cmd = (
                'oc get nodes '
                '--no-headers -l node-role.kubernetes.io/infra=true|wc -l')
            infra_node_count = command.cmd_run(infra_node_count_cmd,
                                               self.master)

            # Skip test case if number infra nodes are less than #2
            if int(infra_node_count) < 2:
                self.skipTest('Available number of infra nodes "{}", it should'
                              ' be more than 1'.format(infra_node_count))

            # Drain the node
            drain_cmd = ('oc adm drain {} --force=true --ignore-daemonsets '
                         '--delete-local-data'.format(node))
            command.cmd_run(drain_cmd, hostname=self.master)

            # Cleanup to make node schedulable
            cmd_schedule = (
                'oc adm manage-node {} --schedulable=true'.format(node))
            self.addCleanup(command.cmd_run,
                            cmd_schedule,
                            hostname=self.master)

        # Wait for pod to get absent
        wait_for_resource_absence(self.master, 'pod', old_cassandra_pod)

        # Wait for new pod to come up
        new_cassandra_pod = get_pod_name_from_rc(
            self.master, self.metrics_rc_hawkular_cassandra)
        wait_for_pod_be_ready(self.master, new_cassandra_pod)

        # Validate iscsi and multipath
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            self.metrics_rc_hawkular_cassandra,
            rtype='rc',
            heketi_server_url=self.registry_heketi_server_url,
            is_registry_gluster=True)
def get_iscsi_block_devices_by_path(node, iqn=None, raise_on_error=True):
    """Get list of iscsiadm block devices from path.

    Args:
        node (str): where we want to run the command.
        iqn (str): name of iqn.
    returns:
        dictionary: block devices and there ips.
    raises:
        ExecutionError: In case of any failure if raise_on_error=True.
    """
    cmd = "set -o pipefail && ((ls --format=context /dev/disk/by-path/ip*"
    if iqn:
        cmd += " | grep %s" % iqn
    cmd += ") | awk -F '/|:|-' '{print $10,$25}')"

    out = cmd_run(cmd, node, raise_on_error=raise_on_error)

    if not out:
        return out

    out_dic = {}
    for i in out.split("\n"):
        ip, device = i.strip().split(" ")
        out_dic[device] = ip

    return out_dic
    def test_initiator_side_failures_initiator_and_target_on_different_node(
            self):

        nodes = oc_get_schedulable_nodes(self.node)

        # get list of all gluster nodes
        cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
               "-o=custom-columns=:.spec.nodeName")
        g_nodes = cmd_run(cmd, self.node)
        g_nodes = g_nodes.split('\n') if g_nodes else g_nodes

        # skip test case if required schedulable node count not met
        if len(set(nodes) - set(g_nodes)) < 2:
            self.skipTest("skipping test case because it needs at least two"
                          " nodes schedulable")

        # make containerized Gluster nodes unschedulable
        if g_nodes:
            # make gluster nodes unschedulable
            oc_adm_manage_node(
                self.node, '--schedulable=false',
                nodes=g_nodes)

            # make gluster nodes schedulable
            self.addCleanup(
                oc_adm_manage_node, self.node, '--schedulable=true',
                nodes=g_nodes)

        self.initiator_side_failures()
    def test_glusterblock_logs_presence_verification(self):
        """Validate presence of glusterblock provisioner POD and it's status"""

        # Get glusterblock provisioner dc name
        cmd = ("oc get dc | awk '{ print $1 }' | "
               "grep -e glusterblock -e provisioner")
        dc_name = cmd_run(cmd, self.ocp_master_node[0], True)

        # Get glusterblock provisioner pod name and it's status
        gb_prov_name, gb_prov_status = oc_get_custom_resource(
            self.node, 'pod', custom=':.metadata.name,:.status.phase',
            selector='deploymentconfig=%s' % dc_name)[0]
        self.assertEqual(gb_prov_status, 'Running')

        # Create Secret, SC and PVC
        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # Get list of Gluster nodes
        g_hosts = list(g.config.get("gluster_servers", {}).keys())
        self.assertGreater(
            len(g_hosts), 0,
            "We expect, at least, one Gluster Node/POD:\n %s" % g_hosts)

        # Perform checks on Gluster nodes/PODs
        logs = ("gluster-block-configshell", "gluster-blockd")

        gluster_pods = oc_get_pods(
            self.ocp_client[0], selector="glusterfs-node=pod")
        cmd = "tail -n 5 /var/log/glusterfs/gluster-block/%s.log"
        for g_host in g_hosts:
            for log in logs:
                out = cmd_run_on_gluster_pod_or_node(
                    self.ocp_client[0], cmd % log, gluster_node=g_host)
                self.assertTrue(out, "Command '%s' output is empty." % cmd)
Exemplo n.º 12
0
    def test_heketi_logs_after_heketi_pod_restart(self):

        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        find_string_in_log = r"Started background pending operations cleaner"
        ocp_node = self.ocp_master_node[0]

        # Restart heketi pod
        heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
        oc_delete(ocp_node,
                  'pod',
                  heketi_pod_name,
                  collect_logs=self.heketi_logs_before_delete)
        self.addCleanup(self._heketi_pod_delete_cleanup)
        wait_for_resource_absence(ocp_node, 'pod', heketi_pod_name)
        heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
        wait_for_pod_be_ready(ocp_node, heketi_pod_name)
        self.assertTrue(hello_heketi(h_node, h_server),
                        "Heketi server {} is not alive".format(h_server))

        # Collect logs after heketi pod restart
        cmd = "oc logs {}".format(heketi_pod_name)
        out = cmd_run(cmd, hostname=ocp_node)

        # Validate string is present in heketi logs
        pending_check = re.compile(find_string_in_log)
        entry_list = pending_check.findall(out)
        self.assertIsNotNone(entry_list,
                             "Failed to find entries in heketi logs")

        for entry in entry_list:
            self.assertEqual(
                entry, find_string_in_log,
                "Failed to validate, Expected {}; Actual {}".format(
                    find_string_in_log, entry))
    def test_initiator_side_failures_initiator_and_target_on_different_node(
            self):

        nodes = oc_get_schedulable_nodes(self.node)

        # get list of all gluster nodes
        cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
               "-o=custom-columns=:.spec.nodeName")
        g_nodes = cmd_run(cmd, self.node)
        g_nodes = g_nodes.split('\n') if g_nodes else g_nodes

        # skip test case if required schedulable node count not met
        if len(set(nodes) - set(g_nodes)) < 2:
            self.skipTest("skipping test case because it needs at least two"
                          " nodes schedulable")

        # make containerized Gluster nodes unschedulable
        if g_nodes:
            # make gluster nodes unschedulable
            oc_adm_manage_node(self.node, '--schedulable=false', nodes=g_nodes)

            # make gluster nodes schedulable
            self.addCleanup(oc_adm_manage_node,
                            self.node,
                            '--schedulable=true',
                            nodes=g_nodes)

        self.initiator_side_failures()
def get_iscsi_block_devices_by_path(node, iqn=None, raise_on_error=True):
    """Get list of iscsiadm block devices from path.

    Args:
        node (str): where we want to run the command.
        iqn (str): name of iqn.
    returns:
        dictionary: block devices and there ips.
    raises:
        ExecutionError: In case of any failure if raise_on_error=True.
    """
    cmd = "set -o pipefail && ((ls --format=context /dev/disk/by-path/ip*"
    if iqn:
        cmd += " | grep %s" % iqn
    cmd += ") | awk -F '/|:|-' '{print $10,$25}')"

    out = cmd_run(cmd, node, raise_on_error=raise_on_error)

    if not out:
        return out

    out_dic = {}
    for i in out.split("\n"):
        ip, device = i.strip().split(" ")
        out_dic[device] = ip

    return out_dic
Exemplo n.º 15
0
    def setUp(self):
        """Initialize all the variables necessary for test cases."""
        super(TestMetricsAndGlusterRegistryValidation, self).setUp()

        try:
            metrics_config = g.config['openshift']['metrics']
            self.metrics_project_name = metrics_config['metrics_project_name']
            self.metrics_rc_hawkular_cassandra = (
                metrics_config['metrics_rc_hawkular_cassandra'])
            self.metrics_rc_hawkular_metrics = (
                metrics_config['metrics_rc_hawkular_metrics'])
            self.metrics_rc_heapster = metrics_config['metrics_rc_heapster']
            self.registry_heketi_server_url = (
                g.config['openshift']['registry_heketi_config']
                ['heketi_server_url'])
            self.registry_project_name = (
                g.config['openshift']['registry_project_name'])
            self.registry_servers_info = g.config['gluster_registry_servers']
        except KeyError as err:
            msg = "Config file doesn't have key {}".format(err)
            g.log.error(msg)
            self.skipTest(msg)

        self.master = self.ocp_master_node[0]
        cmd = "oc project --short=true"
        current_project = command.cmd_run(cmd, self.master)
        switch_oc_project(self.master, self.metrics_project_name)
        self.addCleanup(switch_oc_project, self.master, current_project)
    def test_initiator_side_failures_initiator_and_target_on_same_node(self):
        # Note: This test case is supported for containerized gluster only.

        nodes = oc_get_schedulable_nodes(self.node)

        # get list of all gluster nodes
        cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
               "-o=custom-columns=:.spec.nodeName")
        g_nodes = cmd_run(cmd, self.node)
        g_nodes = g_nodes.split('\n') if g_nodes else g_nodes

        # get the list of nodes other than gluster
        o_nodes = list((set(nodes) - set(g_nodes)))

        # skip the test case if it is crs setup
        if not g_nodes:
            self.skipTest("skipping test case because it is not a "
                          "containerized gluster setup. "
                          "This test case is for containerized gluster only.")

        # make other nodes unschedulable
        oc_adm_manage_node(self.node, '--schedulable=false', nodes=o_nodes)

        # make other nodes schedulable
        self.addCleanup(oc_adm_manage_node,
                        self.node,
                        '--schedulable=true',
                        nodes=o_nodes)

        self.initiator_side_failures()
    def test_node_failure_pv_mounted(self):
        """Test node failure when PV is mounted with app pods running"""
        filepath = "/mnt/file_for_testing_volume.log"
        pvc_name = self.create_and_wait_for_pvc()

        dc_and_pod_names = self.create_dcs_with_pvc(pvc_name)
        dc_name, pod_name = dc_and_pod_names[pvc_name]

        mount_point = "df -kh /mnt -P | tail -1 | awk '{{print $1}}'"
        pod_cmd = "oc exec {} -- {}".format(pod_name, mount_point)
        hostname = command.cmd_run(pod_cmd, hostname=self.node)
        hostname = hostname.split(":")[0]

        vm_name = find_vm_name_by_ip_or_hostname(hostname)
        self.addCleanup(power_on_vm_by_name, vm_name)
        power_off_vm_by_name(vm_name)

        cmd = "dd if=/dev/urandom of={} bs=1K count=100".format(filepath)
        ret, _, err = oc_rsh(self.node, pod_name, cmd)
        self.assertFalse(
            ret, "Failed to execute command {} on {} with error {}".format(
                cmd, self.node, err))

        oc_delete(self.node, 'pod', pod_name)
        wait_for_resource_absence(self.node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        ret, _, err = oc_rsh(self.node, pod_name, cmd)
        self.assertFalse(
            ret, "Failed to execute command {} on {} with error {}".format(
                cmd, self.node, err))
    def test_initiator_side_failures_initiator_and_target_on_same_node(self):
        # Note: This test case is supported for containerized gluster only.

        nodes = oc_get_schedulable_nodes(self.node)

        # get list of all gluster nodes
        cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
               "-o=custom-columns=:.spec.nodeName")
        g_nodes = cmd_run(cmd, self.node)
        g_nodes = g_nodes.split('\n') if g_nodes else g_nodes

        # get the list of nodes other than gluster
        o_nodes = list((set(nodes) - set(g_nodes)))

        # skip the test case if it is crs setup
        if not g_nodes:
            self.skipTest("skipping test case because it is not a "
                          "containerized gluster setup. "
                          "This test case is for containerized gluster only.")

        # make other nodes unschedulable
        oc_adm_manage_node(
            self.node, '--schedulable=false', nodes=o_nodes)

        # make other nodes schedulable
        self.addCleanup(
            oc_adm_manage_node, self.node, '--schedulable=true', nodes=o_nodes)

        self.initiator_side_failures()
    def test_restart_gluster_block_provisioner_pod(self):
        """Restart gluster-block provisioner pod."""

        # Get glusterblock provisioner dc name
        cmd = ("oc get dc | awk '{ print $1 }' | "
               "grep -e glusterblock -e provisioner")
        dc_name = command.cmd_run(cmd, self.ocp_master_node[0], True)

        # create heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=5, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 5")
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])

        # restart gluster-block-provisioner-pod
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        oc_delete(self.ocp_master_node[0], 'pod', pod_name)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)

        # new gluster-pod name
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)

        # create new heketi block volume
        vol_info = heketi_blockvolume_create(self.heketi_client_node,
                                             self.heketi_server_url,
                                             size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi block"
                        "volume of size 2")
        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url,
                                  vol_info['id'])
Exemplo n.º 20
0
 def _wait_for_docker_service_status(self, pod_host_ip, status, state):
     for w in waiter.Waiter(30, 3):
         out = command.cmd_run(DOCKER_SERVICE.format("status"), pod_host_ip)
         for line in out.splitlines():
             status_match = re.search(SERVICE_STATUS_REGEX, line)
             if (status_match and status_match.group(1) == status
                     and status_match.group(2) == state):
                 return True
Exemplo n.º 21
0
def get_active_and_enabled_devices_from_mpath(node, mpath):
    """Get active and enabled devices from mpath name.

    Args:
        node (str): where we want to run the command.
        mpath (str): name of mpath for which we have to find devices.
    Returns:
        dictionary: devices info
    Raises:
        ExecutionError: In case of any failure
    """

    cmd = ("set -o pipefail && ((multipath -ll %s | grep -A 1 status=%s)"
           " | cut -d ':' -f 4 | awk '{print $2}')")

    active = cmd_run(cmd % (mpath, 'active'), node).split('\n')[1::2]
    enabled = cmd_run(cmd % (mpath, 'enabled'), node).split('\n')[1::2]

    out_dic = {'active': active, 'enabled': enabled}
    return out_dic
Exemplo n.º 22
0
def _get_heketi_server_version_str(ocp_client_node=None):
    """Gets Heketi server package version from Heketi POD.

    Args:
        ocp_client_node (str): Node on which the version check command should
                               run.
    Returns:
        str : heketi version, i.e. '7.0.0-1'
    Raises: 'exceptions.ExecutionError' if failed to get version
    """
    if not ocp_client_node:
        ocp_client_node = list(g.config["ocp_servers"]["client"].keys())[0]
    get_package_version_cmd = (
        "rpm -q heketi --queryformat '%{version}-%{release}\n' | "
        "cut -d '.' -f 1,2,3")

    # NOTE(vponomar): we implement Heketi POD call command here, not in common
    # module for OC commands just to avoid cross-reference imports.
    get_pods_cmd = "oc get -o wide --no-headers=true pods --selector heketi"
    heketi_pods = command.cmd_run(get_pods_cmd, hostname=ocp_client_node)

    err_msg = ""
    for heketi_pod_line in heketi_pods.split("\n"):
        heketi_pod_data = heketi_pod_line.split()
        if ("-deploy" in heketi_pod_data[0]
                or heketi_pod_data[1].lower() != "1/1"
                or heketi_pod_data[2].lower() != "running"):
            continue
        try:
            pod_cmd = "oc exec %s -- %s" % (
                heketi_pod_data[0], get_package_version_cmd)
            return command.cmd_run(pod_cmd, hostname=ocp_client_node)
        except Exception as e:
            err = ("Failed to run '%s' command on '%s' Heketi POD. "
                   "Error: %s\n" % (pod_cmd, heketi_pod_data[0], e))
            err_msg += err
            g.log.error(err)
    if not err_msg:
        err_msg += "Haven't found 'Running' and 'ready' (1/1) Heketi PODs.\n"
    err_msg += "Heketi PODs: %s" % heketi_pods
    raise exceptions.ExecutionError(err_msg)
Exemplo n.º 23
0
def node_delete_iptables_rules(node, chain, rules, raise_on_error=True):
    """Delete iptables rules

    Args:
        node (str): Node on which iptables rules should be deleted.
        chain (str): iptables chain from which rule(s) need to be deleted.
        rules (str|tuple|list): Rule(s) which need(s) to be deleted from
                                a chain.
    Reuturns:
        None
    Exception:
        AssertionError: In case command fails to execute and
                        raise_on_error set to True
    """
    rules = [rules] if isinstance(rules, six.string_types) else rules

    delete_iptables_rule_cmd = "iptables --delete %s %s"
    for rule in rules:
        command.cmd_run(
            delete_iptables_rule_cmd % (chain, rule), node,
            raise_on_error=raise_on_error)
Exemplo n.º 24
0
    def test_heketi_volume_mount(self):
        self.node = self.ocp_master_node[0]
        try:
            cmd_run('rpm -q glusterfs-fuse', self.node)
        except AssertionError:
            self.skipTest("gluster-fuse package is not present on Node %s" %
                          self.node)

        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Create volume
        vol_info = heketi_volume_create(h_node, h_url, 2, json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url, vol_info['id'])

        mount_point = vol_info['mount']['glusterfs']['device']
        mount_dir = '/mnt/dir-%s' % utils.get_random_str()
        mount_cmd = 'mount -t glusterfs %s %s' % (mount_point, mount_dir)

        # Create directory to mount volume
        cmd_run('mkdir %s' % mount_dir, self.node)
        self.addCleanup(cmd_run, 'rm -rf %s' % mount_dir, self.node)

        # Mount volume
        cmd_run(mount_cmd, self.node)
        self.addCleanup(cmd_run, 'umount %s' % mount_dir, self.node)

        # Run I/O to make sure Mount point works
        _file = 'file'
        run_io_cmd = ('dd if=/dev/urandom of=%s/%s bs=4k count=1000' %
                      (mount_dir, _file))

        # Verify size of volume
        cmd_run(run_io_cmd, self.node)
        size = cmd_run('df -kh --output=size %s | tail -1' % mount_dir,
                       self.node).strip()
        self.assertEqual('2.0G', size)

        # Verify file on gluster vol bricks
        for brick in vol_info['bricks']:
            node_id = brick['node']
            node_info = heketi_node_info(h_node, h_url, node_id, json=True)
            brick_host = node_info['hostnames']['storage'][0]
            cmd_run_on_gluster_pod_or_node(self.node,
                                           'ls %s/%s' % (brick['path'], _file),
                                           brick_host)
def get_active_and_enabled_devices_from_mpath(node, mpath):
    """Get active and enabled devices from mpath name.

    Args:
        node (str): where we want to run the command.
        mpath (str): name of mpath for which we have to find devices.
    Returns:
        dictionary: devices info
    Raises:
        ExecutionError: In case of any failure
    """

    cmd = ("set -o pipefail && ((multipath -ll %s | grep -A 1 status=%s)"
           " | cut -d ':' -f 4 | awk '{print $2}')")

    active = cmd_run(cmd % (mpath, 'active'), node).split('\n')[1::2]
    enabled = cmd_run(cmd % (mpath, 'enabled'), node).split('\n')[1::2]

    out_dic = {
        'active': active,
        'enabled': enabled}
    return out_dic
Exemplo n.º 26
0
    def _get_space_use_percent_in_app_pod(self, pod_name):
        """Check if IO's are running in the app pod"""

        use_percent = []
        cmd = "oc exec {} -- df -h /mnt | tail -1"

        # Run 10 times to track the percentage used
        for _ in range(10):
            out = command.cmd_run(cmd.format(pod_name), self.node).split()[3]
            self.assertTrue(
                out, "Failed to fetch mount point details from the pod "
                "{}".format(pod_name))
            use_percent.append(out[:-1])
        return use_percent
def validate_multipath_pod(hostname, podname, hacount, mpath):
    """Validate multipath for given app-pod.

     Args:
         hostname (str): ocp master node name
         podname (str): app-pod name for which we need to validate
                        multipath. ex : nginx1
         hacount (int): multipath count or HA count. ex: 3
         mpath (str): multipath value to check
     Returns:
         bool: True if successful, otherwise raises exception
    """

    pod_nodename_list = oc_get_custom_resource(hostname,
                                               'pod',
                                               custom=':.spec.nodeName',
                                               name=podname)
    if not pod_nodename_list:
        raise ExecutionError(
            "Failed to get ip for pod from hostname {}".format(hostname))

    pod_nodename = pod_nodename_list[0]
    active_node_count, enable_node_count = (1, hacount - 1)
    cmd = "multipath -ll %s | grep 'status=active' | wc -l" % mpath
    active_count = int(cmd_run(cmd, pod_nodename))
    assert active_node_count == active_count, (
        "Active node count on %s for %s is %s and not 1" %
        (pod_nodename, podname, active_count))

    cmd = "multipath -ll %s | grep 'status=enabled' | wc -l" % mpath
    enable_count = int(cmd_run(cmd, pod_nodename))
    assert enable_node_count == enable_count, (
        "Passive node count on %s for %s is %s and not %s" %
        (pod_nodename, podname, enable_count, enable_node_count))

    g.log.info("Validation of multipath for %s is successfull" % podname)
    return True
def get_mpath_name_from_device_name(node, device, raise_on_error=True):
    """Get name of mpath device form block device

    Args:
        node (str): where we want to run the command.
        device (str): for which we have to find mpath.
    Returns:
        str: name of device
    Raises:
        ExecutionError: In case of any failure if raise_on_error=True.
    """
    cmd = ("set -o pipefail && ((lsblk -n --list --output=NAME /dev/%s)"
           " | tail -1)" % device)

    return cmd_run(cmd, node, raise_on_error=raise_on_error)
Exemplo n.º 29
0
def node_add_iptables_rules(node, chain, rules, raise_on_error=True):
    """Append iptables rules

    Args:
        node (str): Node on which iptables rules should be added.
        chain (str): iptables chain in which rule(s) need to be appended.
        rules (str|tuple|list): Rule(s) which need(s) to be added to a chain.
    Reuturns:
        None
    Exception:
        AssertionError: In case command fails to execute and
                        raise_on_error set to True
    """
    rules = [rules] if isinstance(rules, six.string_types) else rules

    add_iptables_rule_cmd = "iptables --append %s %s"
    check_iptables_rule_cmd = "iptables --check %s %s"
    for rule in rules:
        try:
            command.cmd_run(check_iptables_rule_cmd % (chain, rule), node)
        except AssertionError:
            command.cmd_run(
                add_iptables_rule_cmd % (chain, rule), node,
                raise_on_error=raise_on_error)
def get_mpath_name_from_device_name(node, device, raise_on_error=True):
    """Get name of mpath device form block device

    Args:
        node (str): where we want to run the command.
        device (str): for which we have to find mpath.
    Returns:
        str: name of device
    Raises:
        ExecutionError: In case of any failure if raise_on_error=True.
    """
    cmd = ("set -o pipefail && ((lsblk -n --list --output=NAME /dev/%s)"
           " | tail -1)" % device)

    return cmd_run(cmd, node, raise_on_error=raise_on_error)
 def verify_all_paths_are_up_in_multipath(
         self, mpath_name, hacount, node, timeout=30, interval=5):
     for w in Waiter(timeout, interval):
         out = command.cmd_run('multipath -ll %s' % mpath_name, node)
         count = 0
         for line in out.split('\n'):
             if 'active ready running' in line:
                 count += 1
         if hacount == count:
             break
     msg = "Paths are not up equal to hacount %s in mpath %s on Node %s" % (
         hacount, out, node)
     self.assertEqual(hacount, count, msg)
     for state in ['failed', 'faulty', 'undef']:
         msg = "All paths are not up in mpath %s on Node %s" % (out, node)
         self.assertNotIn(state, out, msg)
Exemplo n.º 32
0
    def test_docker_service_restart(self):
        """Validate docker service should not fail after restart"""

        # Skip the TC if independent mode deployment
        if not self.is_containerized_gluster():
            self.skipTest(
                "Skipping this test case as LVM script is not available in "
                "independent mode deployment")

        # Skip the TC if docker storage driver other than devicemapper
        pod_host_ip = self.pod_name[0]["pod_host_ip"]
        cmd = "docker info -f '{{json .Driver}}'"
        device_driver = command.cmd_run(cmd, pod_host_ip)
        if device_driver != '"devicemapper"':
            self.skipTest(
                "Skipping this test case as docker storage driver is not "
                "set to devicemapper")

        # Validate LVM environment is present
        custom = (r'":spec.containers[*].env[?(@.name==\"{}\")]'
                  r'.value"'.format(ENV_NAME))
        env_var_value = openshift_ops.oc_get_custom_resource(
            self.oc_node, "pod", custom, self.h_pod_name)[0]
        err_msg = "Heketi {} environment should has {}".format(
            ENV_NAME, ENV_VALUE)
        self.assertEqual(env_var_value, ENV_VALUE, err_msg)

        # Check docker status is active
        command.cmd_run(DOCKER_SERVICE.format("is-active"), pod_host_ip)

        # Restart the docker service
        self.addCleanup(self._check_docker_status_is_active, pod_host_ip)
        command.cmd_run(DOCKER_SERVICE.format("restart"), pod_host_ip)

        # Wait for docker service to become active
        self._wait_for_docker_service_status(pod_host_ip, "active", "running")

        # Wait for glusterfs pods to be ready
        openshift_ops.wait_for_pods_be_ready(self.oc_node,
                                             len(self.gluster_servers),
                                             "glusterfs=storage-pod")

        # Check the docker pool is available after docker restart
        cmd = "ls -lrt /dev/docker-vg/docker-pool"
        command.cmd_run(cmd, pod_host_ip)

        # Create PVC after docker restart
        self.create_and_wait_for_pvcs()
    def setUp(self):
        """Initialize all the variables which are necessary for test cases"""
        super(TestPrometheusAndGlusterRegistryValidation, self).setUp()

        try:
            prometheus_config = g.config['openshift']['prometheus']
            self._prometheus_project_name = prometheus_config[
                'prometheus_project_name']
            self._prometheus_resources_selector = prometheus_config[
                'prometheus_resources_selector']
            self._alertmanager_resources_selector = prometheus_config[
                'alertmanager_resources_selector']
            self._registry_heketi_server_url = (
                g.config['openshift']['registry_heketi_config'][
                    'heketi_server_url'])
            self._registry_project_name = (
                g.config['openshift']['registry_project_name'])
            self._registry_servers_info = (
                g.config['gluster_registry_servers'])
        except KeyError as err:
            self.skipTest("Config file doesn't have key {}".format(err))

        # Skip the test if iscsi-initiator-utils version is not the expected
        cmd = ("rpm -q iscsi-initiator-utils "
               "--queryformat '%{version}-%{release}\n'"
               "| cut -d '.' -f 1,2,3,4")
        e_pkg_version = "6.2.0.874-17"
        for g_server in self.gluster_servers:
            out = self.cmd_run(cmd, g_server)
            if parse_version(out) < parse_version(e_pkg_version):
                self.skipTest(
                    "Skip the test as iscsi-initiator-utils package version {}"
                    "is less than version {} found on the node {}, for more "
                    "info refer to BZ-1624670".format(
                        out, e_pkg_version, g_server))

        self._master = self.ocp_master_node[0]

        # Switch to namespace conatining prometheus pods
        cmd = "oc project --short=true"
        current_project = command.cmd_run(cmd, self._master)
        openshift_ops.switch_oc_project(
            self._master, self._prometheus_project_name)
        self.addCleanup(
            openshift_ops.switch_oc_project, self._master, current_project)
Exemplo n.º 34
0
    def test_lvm_script_executable_on_host(self):
        """Validate lvm script is executable on host instead
           of container"""

        # Skip the TC if independent mode deployment
        if not self.is_containerized_gluster():
            self.skipTest(
                "Skipping this test as LVM script is not available in "
                "independent mode deployment")

        pod_name = self.pod_name[0]['pod_name']
        gluster_pod_label = "glusterfs=storage-pod"

        # Remove LVM banaries to validate /usr/sbin/exec-on-host script
        # is execute LVM commands on host instead on pod
        cmd = "rm /usr/sbin/lvm"
        ret, _, err = openshift_ops.oc_rsh(self.oc_node, pod_name, cmd)
        self.addCleanup(
            openshift_ops.wait_for_pods_be_ready, self.oc_node,
            len(self.gluster_servers), gluster_pod_label)
        self.addCleanup(
            openshift_ops.wait_for_resource_absence, self.oc_node, "pod",
            pod_name)
        self.addCleanup(
            openshift_ops.oc_delete, self.oc_node, "pod", pod_name)
        err_msg = (
            "failed to execute command {} on pod {} with error: {}"
            "".format(cmd, pod_name, err))
        self.assertFalse(ret, err_msg)

        # Validate LVM command is not executable in pod
        cmd = "oc rsh {} lvs".format(pod_name)
        stdout = command.cmd_run(cmd, self.oc_node, raise_on_error=False)
        self.assertIn(
            'exec: \\"lvs\\": executable file not found in $PATH', stdout)

        # Run LVM command with /usr/sbin/exec-on-host
        cmd = "{} lvs".format(ENV_VALUE)
        ret, out, err = openshift_ops.oc_rsh(self.oc_node, pod_name, cmd)
        err_msg = (
            "failed to execute command {} on pod {} with error: {}"
            "".format(cmd, pod_name, err))
        self.assertFalse(ret, err_msg)
        self.assertIn("VG", out)
Exemplo n.º 35
0
    def setUp(self):
        """Initialize all the variables necessary for test cases."""
        super(TestLoggingAndGlusterRegistryValidation, self).setUp()

        try:
            logging_config = g.config['openshift']['logging']
            self._logging_project_name = logging_config['logging_project_name']
            self._logging_fluentd_ds = logging_config['logging_fluentd_ds']
            self._logging_es_dc = logging_config['logging_es_dc']
            self._logging_kibana_dc = logging_config['logging_kibana_dc']
            self._registry_heketi_server_url = (
                g.config['openshift']['registry_heketi_config']
                ['heketi_server_url'])
            self._registry_project_name = (
                g.config['openshift']['registry_project_name'])
            self._registry_servers_info = g.config['gluster_registry_servers']
        except KeyError as err:
            msg = "Config file doesn't have key {}".format(err)
            g.log.error(msg)
            self.skipTest(msg)

        # Skip the test if iscsi-initiator-utils version is not the expected
        cmd = ("rpm -q iscsi-initiator-utils "
               "--queryformat '%{version}-%{release}\n'"
               "| cut -d '.' -f 1,2,3,4")
        e_pkg_version = "6.2.0.874-17"
        for g_server in self.gluster_servers:
            out = self.cmd_run(cmd, g_server)
            if parse_version(out) < parse_version(e_pkg_version):
                msg = ("Skip test since isci initiator utils version actual: "
                       "{out} is less than expected: {ver} on node {server},"
                       " for more info refer to BZ-1624670".format(
                           out=out, ver=e_pkg_version, server=g_server))
                g.log.error(msg)
                self.skipTest(msg)

        self._master = self.ocp_master_node[0]
        cmd = "oc project --short=true"
        current_project = command.cmd_run(cmd, self._master)
        openshift_ops.switch_oc_project(self._master,
                                        self._logging_project_name)
        self.addCleanup(openshift_ops.switch_oc_project, self._master,
                        current_project)
def get_iscsi_session(node, iqn=None, raise_on_error=True):
    """Get the list of ip's of iscsi sessions.

    Args:
        node (str): where we want to run the command.
        iqn (str): name of iqn.
    Returns:
        list: list of session ip's.
    raises:
        ExecutionError: In case of any failure if raise_on_error=True.
    """

    cmd = "set -o pipefail && ((iscsiadm -m session"
    if iqn:
        cmd += " | grep %s" % iqn
    cmd += ") | awk '{print $3}' | cut -d ':' -f 1)"

    out = cmd_run(cmd, node, raise_on_error=raise_on_error)

    return out.split("\n") if out else out
def get_iscsi_session(node, iqn=None, raise_on_error=True):
    """Get the list of ip's of iscsi sessions.

    Args:
        node (str): where we want to run the command.
        iqn (str): name of iqn.
    Returns:
        list: list of session ip's.
    raises:
        ExecutionError: In case of any failure if raise_on_error=True.
    """

    cmd = "set -o pipefail && ((iscsiadm -m session"
    if iqn:
        cmd += " | grep %s" % iqn
    cmd += ") | awk '{print $3}' | cut -d ':' -f 1)"

    out = cmd_run(cmd, node, raise_on_error=raise_on_error)

    return out.split("\n") if out else out
    def test_glusterblock_logs_presence_verification(self):
        """Validate presence of glusterblock provisioner POD and it's status"""

        # Get glusterblock provisioner dc name
        cmd = ("oc get dc | awk '{ print $1 }' | "
               "grep -e glusterblock -e provisioner")
        dc_name = cmd_run(cmd, self.ocp_master_node[0], True)

        # Get glusterblock provisioner pod name and it's status
        gb_prov_name, gb_prov_status = oc_get_custom_resource(
            self.node, 'pod', custom=':.metadata.name,:.status.phase',
            selector='deploymentconfig=%s' % dc_name)[0]
        self.assertEqual(gb_prov_status, 'Running')

        # Create Secret, SC and PVC
        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # Get list of Gluster nodes
        g_hosts = list(g.config.get("gluster_servers", {}).keys())
        self.assertGreater(
            len(g_hosts), 0,
            "We expect, at least, one Gluster Node/POD:\n %s" % g_hosts)

        # Perform checks on Gluster nodes/PODs
        logs = ("gluster-block-configshell", "gluster-blockd")

        gluster_pods = oc_get_pods(
            self.ocp_client[0], selector="glusterfs-node=pod")
        if gluster_pods:
            cmd = "tail -n 5 /var/log/glusterfs/gluster-block/%s.log"
        else:
            cmd = "tail -n 5 /var/log/gluster-block/%s.log"
        for g_host in g_hosts:
            for log in logs:
                out = cmd_run_on_gluster_pod_or_node(
                    self.ocp_client[0], cmd % log, gluster_node=g_host)
                self.assertTrue(out, "Command '%s' output is empty." % cmd)
def enable_pvc_resize(master_node):
    '''
     This function edits the /etc/origin/master/master-config.yaml
     file - to enable pv_resize feature
     and restarts atomic-openshift service on master node
     Args:
         master_node (str): hostname of masternode  on which
                           want to edit the
                           master-config.yaml file
     Returns:
         bool: True if successful,
               otherwise raise Exception
    '''
    version = get_openshift_version()
    if version < "3.9":
        msg = ("pv resize is not available in openshift "
               "version %s " % version)
        g.log.error(msg)
        raise NotSupportedException(msg)

    try:
        conn = g.rpyc_get_connection(master_node, user="******")
        if conn is None:
            err_msg = ("Failed to get rpyc connection of node %s"
                       % master_node)
            g.log.error(err_msg)
            raise ExecutionError(err_msg)

        with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f:
            data = yaml.load(f)
            dict_add = data['admissionConfig']['pluginConfig']
            if "PersistentVolumeClaimResize" in dict_add:
                g.log.info("master-config.yaml file is already edited")
                return True
            dict_add['PersistentVolumeClaimResize'] = {
                'configuration': {
                    'apiVersion': 'v1',
                    'disable': 'false',
                    'kind': 'DefaultAdmissionConfig'}}
            data['admissionConfig']['pluginConfig'] = dict_add
            kube_config = data['kubernetesMasterConfig']
            for key in ('apiServerArguments', 'controllerArguments'):
                kube_config[key] = (
                    kube_config.get(key)
                    if isinstance(kube_config.get(key), dict) else {})
                value = ['ExpandPersistentVolumes=true']
                kube_config[key]['feature-gates'] = value
        with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f:
            yaml.dump(data, f, default_flow_style=False)
    except Exception as err:
        raise ExecutionError("failed to edit master-config.yaml file "
                             "%s on %s" % (err, master_node))
    finally:
        g.rpyc_close_connection(master_node, user="******")

    g.log.info("successfully edited master-config.yaml file "
               "%s" % master_node)
    if version == "3.9":
        cmd = ("systemctl restart atomic-openshift-master-api "
               "atomic-openshift-master-controllers")
    else:
        cmd = ("/usr/local/bin/master-restart api && "
               "/usr/local/bin/master-restart controllers")
    ret, out, err = g.run(master_node, cmd, "root")
    if ret != 0:
        err_msg = "Failed to execute cmd %s on %s\nout: %s\nerr: %s" % (
            cmd, master_node, out, err)
        g.log.error(err_msg)
        raise ExecutionError(err_msg)

    # Wait for API service to be ready after the restart
    for w in waiter.Waiter(timeout=120, interval=1):
        try:
            cmd_run("oc get nodes", master_node)
            return True
        except AssertionError:
            continue
    err_msg = "Exceeded 120s timeout waiting for OCP API to start responding."
    g.log.error(err_msg)
    raise ExecutionError(err_msg)
Exemplo n.º 40
0
 def cmd_run(self, cmd, hostname=None, raise_on_error=True):
     if not hostname:
         hostname = self.ocp_master_node[0]
     return command.cmd_run(
         cmd=cmd, hostname=hostname, raise_on_error=raise_on_error)
Exemplo n.º 41
0
 def is_containerized_gluster(self):
     cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
            "-o=custom-columns=:.spec.nodeName")
     g_nodes = command.cmd_run(cmd, self.ocp_client[0])
     g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
     return not not g_nodes