def test_gluster_detect_drop_of_out_traffic_as_network_failure(self):
        """
        Test Case:
        1) Create a volume and start it.
        2) Add an iptable rule to drop outbound glusterd traffic
        3) Check if the rule is added in iptables list
        4) Execute few Gluster CLI commands like volume status, peer status
        5) Gluster CLI commands should fail with suitable error message
        """
        # Set iptablerule_set as false initially
        self.iptablerule_set = False

        # Set iptable rule on one node to drop outbound glusterd traffic
        cmd = "iptables -I OUTPUT -p tcp --dport 24007 -j DROP"
        ret, _, _ = g.run(self.servers[1], cmd)
        self.assertEqual(ret, 0, "Failed to set iptable rule on the node: %s"
                         % self.servers[1])
        g.log.info("Successfully added the rule to iptable")

        # Update iptablerule_set to true
        self.iptablerule_set = True

        # Confirm if the iptable rule was added successfully
        iptable_rule = "'OUTPUT -p tcp -m tcp --dport 24007 -j DROP'"
        cmd = "iptables -S OUTPUT | grep %s" % iptable_rule
        ret, _, _ = g.run(self.servers[1], cmd)
        self.assertEqual(ret, 0, "Failed to get the rule from iptable")

        # Fetch number of nodes in the pool, except localhost
        pool_list = nodes_from_pool_list(self.mnode)
        peers_count = len(pool_list) - 1

        # Gluster CLI commands should fail
        # Check volume status command
        ret, _, err = volume_status(self.servers[1])
        self.assertEqual(ret, 2, "Unexpected: gluster volume status command"
                         " did not return any error")

        status_err_count = err.count("Staging failed on")
        self.assertEqual(status_err_count, peers_count, "Unexpected: No. of"
                         " nodes on which vol status cmd failed is not equal"
                         " to peers_count value")
        g.log.info("Volume status command failed with expected error message")

        # Check peer status command and all peers are in 'Disconnected' state
        peer_list = get_peer_status(self.servers[1])

        for peer in peer_list:
            self.assertEqual(int(peer["connected"]), 0, "Unexpected: All"
                             "  the peers are not in 'Disconnected' state")
            self.assertEqual(peer["stateStr"], "Peer in Cluster", "Unexpected:"
                             " All the peers not in 'Peer in Cluster' state")

        g.log.info("Peer status command listed all the peers in the"
                   "expected state")
    def test_validate_glusterd_info(self):
        """
        Steps:
            1. Check for the presence of /var/lib/glusterd/glusterd.info file
            2. Get the UUID of the current NODE
            3. check the value of the uuid returned by executing the command -
                "gluster system:: uuid get "
            4. Check the uuid value shown by other node in the cluster
                for the same node "gluster peer status"
                on one node will give the UUID of the other node
        """
        uuid_list = []
        for server in self.servers:

            # Getting UUID from glusterd.info
            g.log.info("Getting the UUID from glusterd.info")
            ret, glusterd_volinfo, _ = g.run(
                server, "grep -i uuid /var/lib/glusterd/glusterd.info")
            uuid_list.append(glusterd_volinfo)
            glusterd_volinfo = (glusterd_volinfo.split("="))[1]
            self.assertFalse(
                ret, "Failed to run '{}' on '{}' ".format(server, server))
            self.assertIsNotNone(glusterd_volinfo,
                                 "UUID not found in 'glusterd.info' file ")

            # Getting UUID from cmd 'gluster system uuid get'
            ret, get_uuid, _ = g.run(
                server, "gluster system uuid get | awk {'print $2'}")
            self.assertFalse(ret, "Unable to get the UUID ")
            self.assertIsNotNone(get_uuid, "UUID not found")

            # Checking if both the uuid are same
            self.assertEquals(glusterd_volinfo, get_uuid,
                              "UUID does not match in host {}".format(server))

            # Geting the UUID from cmd "gluster peer status"
            for node in self.servers:
                for i in get_peer_status(node):
                    uuid_list.append(i["uuid"])
                if server != node:
                    self.assertTrue(
                        get_uuid.replace("\n", "") in uuid_list,
                        "uuid not matched in {}".format(node))
Esempio n. 3
0
    def test_get_peer_status(self):
        # Get peer status
        """ Example output of peer status

        [{'uuid': '77dc299a-32f7-43d8-9977-7345a344c398',
        'hostname': 'ijk.lab.eng.xyz.com',
        'state': '3',
        'hostnames' : ['ijk.lab.eng.xyz.com'],
        'connected': '1',
        'stateStr': 'Peer in Cluster'},

        {'uuid': 'b15b8337-9f8e-4ec3-8bdb-200d6a67ae12',
        'hostname': 'def.lab.eng.xyz.com',
        'state': '3',
        'hostnames': ['def.lab.eng.xyz.com'],
        'connected': '1',
        'stateStr': 'Peer in Cluster'}
        ]
        """
        g.log.info("Get peer status --xml output as python dict from node %s",
                   self.mnode)
        peer_status_list = get_peer_status(self.mnode)
        self.assertIsNotNone(peer_status_list,
                             ("Failed to get peer status --xml "
                              "output as python dict from "
                              "node %s", self.mnode))
        g.log.info(
            "Successful in getting Peer status --xml output from "
            "node %s as python dict:\n %s", self.mnode, peer_status_list)

        # Validating UUID of the peer with get_peer_status
        server_ips = []
        for server in self.servers:
            server_ips.append(socket.gethostbyname(server))

        for peer_stat in peer_status_list:
            if socket.gethostbyname(peer_stat['hostname']) in server_ips:
                self.assertIsNotNone(
                    re.match(r'([0-9a-f]{8})(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}',
                             peer_stat['uuid'], re.I),
                    ("Invalid UUID for the node '%s'", peer_stat['hostname']))
                g.log.info("Valid UUID '%s' for the node %s",
                           peer_stat['uuid'], peer_stat['hostname'])
    def test_heketi_node_add_with_valid_cluster(self):
        """Test heketi node add operation with valid cluster id"""
        if (openshift_storage_version.get_openshift_storage_version()
                < "3.11.4"):
            self.skipTest(
                "This test case is not supported for < OCS 3.11.4 builds due "
                "to bug BZ-1732831")

        h_client, h_server = self.heketi_client_node, self.heketi_server_url
        ocp_node = self.ocp_master_node[0]

        # Get heketi endpoints before adding node
        h_volume_ids = heketi_ops.heketi_volume_list(
            h_client, h_server, json=True)
        h_endpoints_before_new_node = heketi_ops.heketi_volume_endpoint_patch(
            h_client, h_server, h_volume_ids["volumes"][0])

        cluster_info = heketi_ops.heketi_cluster_list(
            h_client, h_server, json=True)
        storage_hostname, storage_ip = self.add_heketi_node_to_cluster(
            cluster_info["clusters"][0])

        # Get heketi nodes and validate for newly added node
        h_node_ids = heketi_ops.heketi_node_list(h_client, h_server, json=True)
        for h_node_id in h_node_ids:
            node_hostname = heketi_ops.heketi_node_info(
                h_client, h_server, h_node_id, json=True)
            if node_hostname["hostnames"]["manage"][0] == storage_hostname:
                break
            node_hostname = None

        err_msg = ("Newly added heketi node %s not found in heketi node "
                   "list %s" % (storage_hostname, h_node_ids))
        self.assertTrue(node_hostname, err_msg)

        # Check gluster peer status for newly added node
        if self.is_containerized_gluster():
            gluster_pods = openshift_ops.get_ocp_gluster_pod_details(ocp_node)
            gluster_pod = [
                gluster_pod["pod_name"]
                for gluster_pod in gluster_pods
                if gluster_pod["pod_hostname"] == storage_hostname][0]

            gluster_peer_status = peer_ops.get_peer_status(
                podcmd.Pod(ocp_node, gluster_pod))
        else:
            gluster_peer_status = peer_ops.get_peer_status(
                storage_hostname)
        self.assertEqual(
            len(gluster_peer_status), len(self.gluster_servers))

        err_msg = "Expected peer status is 1 and actual is %s"
        for peer in gluster_peer_status:
            peer_status = int(peer["connected"])
            self.assertEqual(peer_status, 1, err_msg % peer_status)

        # Get heketi endpoints after adding node
        h_endpoints_after_new_node = heketi_ops.heketi_volume_endpoint_patch(
            h_client, h_server, h_volume_ids["volumes"][0])

        # Get openshift openshift endpoints and patch with heketi endpoints
        heketi_db_endpoint = openshift_ops.oc_get_custom_resource(
            ocp_node, "dc", name=self.heketi_dc_name,
            custom=".:spec.template.spec.volumes[*].glusterfs.endpoints")[0]
        openshift_ops.oc_patch(
            ocp_node, "ep", heketi_db_endpoint, h_endpoints_after_new_node)
        self.addCleanup(
            openshift_ops.oc_patch, ocp_node, "ep", heketi_db_endpoint,
            h_endpoints_before_new_node)
        ep_addresses = openshift_ops.oc_get_custom_resource(
            ocp_node, "ep", name=heketi_db_endpoint,
            custom=".:subsets[*].addresses[*].ip")[0].split(",")

        err_msg = "Hostname %s not present in endpoints %s" % (
            storage_ip, ep_addresses)
        self.assertIn(storage_ip, ep_addresses, err_msg)