def _verify_pool_list(self, node):
     """ Verifies given nodes are there in the gluster pool list"""
     pool_list = nodes_from_pool_list(self.mnode)
     status = next((n for n in pool_list if n in node.values()), None)
     self.assertIsNotNone(status, ("Node %s is not the pool list :"
                                   " %s" % (node[self.by_type], pool_list)))
     g.log.info("The given node is there in the gluster pool list")
示例#2
0
    def tearDown(self):
        # Add the removed services in firewall
        for service in ('glusterfs', 'rpc-bind'):
            for option in ("", " --permanent"):
                cmd = ("firewall-cmd --zone=public --add-service={}{}".format(
                    service, option))
                ret, _, _ = g.run(self.node_to_probe, cmd)
                if ret:
                    raise ExecutionError("Failed to add firewall service %s "
                                         "on %s" %
                                         (service, self.node_to_probe))

        # Detach servers from cluster
        pool = nodes_from_pool_list(self.mnode)
        self.assertIsNotNone(pool, "Failed to get pool list")
        for node in pool:
            if not peer_detach(self.mnode, node):
                raise ExecutionError("Failed to detach %s from %s" %
                                     (node, self.mnode))
        # Create a cluster
        if not peer_probe_servers(self.mnode, self.servers):
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        self.get_super_method(self, 'tearDown')()
示例#3
0
    def tearDown(self):

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        # clean up all volumes and detaches peers from cluster

        vol_list = get_volume_list(self.mnode)
        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to Cleanup the "
                                     "Volume %s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
示例#4
0
    def tearDown(self):
        """peer teardown
        """
        # Detach all the servers if it's already attached to the cluster
        nodes_in_pool_list = nodes_from_pool_list(self.mnode)
        if nodes_in_pool_list is None:
            g.log.error(
                "Unable to get nodes from gluster pool list "
                "from node %s", self.mnode)
        else:
            g.log.info("Nodes in pool: %s", nodes_in_pool_list)

        if nodes_in_pool_list:
            if self.mnode in nodes_in_pool_list:
                nodes_in_pool_list.remove(self.mnode)
            g.log.info("Detaching servers %s from node %s", nodes_in_pool_list,
                       self.mnode)
            for server in nodes_in_pool_list:
                ret, out, err = peer_detach(self.mnode, server)
                self.assertFalse(
                    (ret != 0 or
                     re.search(r'^peer\sdetach\:\ssuccess(.*)', out) is None),
                    ("Failed to detach server %s from node %s: %s", server,
                     self.mnode, err))
                g.log.info("Successfully detached server %s from node %s: %s",
                           server, self.mnode, out)
            g.log.info("Successfully detached servers %s from node %s",
                       nodes_in_pool_list, self.mnode)

        GlusterBaseClass.tearDown.im_func(self)
示例#5
0
    def test_nodes_from_pool_list(self):
        """Testing nodes from pool list and peer probe by hostname or IP
        """
        # Get list of nodes from 'gluster pool list'
        nodes_in_pool_list = nodes_from_pool_list(self.mnode)
        if nodes_in_pool_list is None:
            g.log.error(
                "Unable to get nodes from gluster pool list "
                "from node %s", self.mnode)
        else:
            g.log.info("Nodes in pool: %s", nodes_in_pool_list)

        # Peer probe by hostname if node in nodes_in_pool_list is IP or
        # Peer probe by IP if node in nodes_in_pool_list is hostname
        for node in nodes_in_pool_list:
            if socket.gethostbyname(node) == node:
                node = socket.gethostbyaddr(node)[0]
            else:
                node = socket.gethostbyname(node)
            if node:
                g.log.info("Peer probe node %s from %s", node, self.mnode)
                ret, out, err = peer_probe(self.mnode, node)
                self.assertFalse(
                    (ret != 0 or
                     re.search(r'^peer\sprobe\:\ssuccess(.*)', out) is None),
                    ("Failed to peer probe %s from node %s", node, self.mnode))
                g.log.info("Successfully peer probed %s from node %s", node,
                           self.mnode)
示例#6
0
    def tearDown(self):

        # Cleanup and umount volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)
        g.log.info("Successful in umounting the volume and Cleanup")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                # check all bricks are online
                ret = wait_for_bricks_to_be_online(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to bring bricks online"
                                         "for volume %s" % volume)
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
    def test_gluster_detect_drop_of_out_traffic_as_network_failure(self):
        """
        Test Case:
        1) Create a volume and start it.
        2) Add an iptable rule to drop outbound glusterd traffic
        3) Check if the rule is added in iptables list
        4) Execute few Gluster CLI commands like volume status, peer status
        5) Gluster CLI commands should fail with suitable error message
        """
        # Set iptablerule_set as false initially
        self.iptablerule_set = False

        # Set iptable rule on one node to drop outbound glusterd traffic
        cmd = "iptables -I OUTPUT -p tcp --dport 24007 -j DROP"
        ret, _, _ = g.run(self.servers[1], cmd)
        self.assertEqual(ret, 0, "Failed to set iptable rule on the node: %s"
                         % self.servers[1])
        g.log.info("Successfully added the rule to iptable")

        # Update iptablerule_set to true
        self.iptablerule_set = True

        # Confirm if the iptable rule was added successfully
        iptable_rule = "'OUTPUT -p tcp -m tcp --dport 24007 -j DROP'"
        cmd = "iptables -S OUTPUT | grep %s" % iptable_rule
        ret, _, _ = g.run(self.servers[1], cmd)
        self.assertEqual(ret, 0, "Failed to get the rule from iptable")

        # Fetch number of nodes in the pool, except localhost
        pool_list = nodes_from_pool_list(self.mnode)
        peers_count = len(pool_list) - 1

        # Gluster CLI commands should fail
        # Check volume status command
        ret, _, err = volume_status(self.servers[1])
        self.assertEqual(ret, 2, "Unexpected: gluster volume status command"
                         " did not return any error")

        status_err_count = err.count("Staging failed on")
        self.assertEqual(status_err_count, peers_count, "Unexpected: No. of"
                         " nodes on which vol status cmd failed is not equal"
                         " to peers_count value")
        g.log.info("Volume status command failed with expected error message")

        # Check peer status command and all peers are in 'Disconnected' state
        peer_list = get_peer_status(self.servers[1])

        for peer in peer_list:
            self.assertEqual(int(peer["connected"]), 0, "Unexpected: All"
                             "  the peers are not in 'Disconnected' state")
            self.assertEqual(peer["stateStr"], "Peer in Cluster", "Unexpected:"
                             " All the peers not in 'Peer in Cluster' state")

        g.log.info("Peer status command listed all the peers in the"
                   "expected state")
示例#9
0
def are_all_self_heal_daemons_are_online(mnode, volname):
    """Verifies whether all the self-heal-daemons are online for the specified
        volume.

    Args:
        mnode (str): Node on which cmd has to be executed.
        volname (str): volume name

    Returns:
        bool : True if all the self-heal-daemons are online for the volume.
            False otherwise.
        NoneType: None if unable to get the volume status
    """
    from glustolibs.gluster.volume_libs import is_distribute_volume
    if is_distribute_volume(mnode, volname):
        g.log.info(
            "Volume %s is a distribute volume. "
            "Hence not checking for self-heal daemons "
            "to be online", volname)
        return True

    service = 'shd'
    failure_msg = ("Verifying all self-heal-daemons are online failed for "
                   "volume %s" % volname)
    # Get volume status
    vol_status = get_volume_status(mnode=mnode,
                                   volname=volname,
                                   service=service)
    if vol_status is None:
        g.log.error(failure_msg)
        return None

    # Get all nodes from pool list
    from glustolibs.gluster.peer_ops import nodes_from_pool_list
    all_nodes = nodes_from_pool_list(mnode)
    if not all_nodes:
        g.log.error(failure_msg)
        return False

    online_status = True
    for node in all_nodes:
        node_shd_status_value = (
            vol_status[volname][node]['Self-heal Daemon']['status'])
        if node_shd_status_value != '1':
            online_status = False
    g.run(mnode, ("gluster volume status %s shd" % volname))
    if online_status is True:
        g.log.info("All self-heal Daemons are online")
        return True
    else:
        g.log.error("Some of the self-heal Daemons are offline")
        return False
示例#10
0
    def setUpClass(cls):
        """
        """
        # Read all the cluster config from the g.config and assign it to
        # class variables
        GlusterBaseClass.setUpClass.im_func(cls)

        # Detach all the servers if it's already attached to the cluster
        nodes_in_pool_list = nodes_from_pool_list(cls.mnode)
        if nodes_in_pool_list is None:
            g.log.error(
                "Unable to get nodes from gluster pool list "
                "from node %s", cls.mnode)
        else:
            g.log.info("Nodes in pool: %s", nodes_in_pool_list)

        if nodes_in_pool_list:
            if cls.mnode in nodes_in_pool_list:
                nodes_in_pool_list.remove(cls.mnode)
            g.log.info("Detaching servers '%s' from the cluster from node %s",
                       nodes_in_pool_list, cls.mnode)
            ret = peer_detach_servers(cls.mnode, nodes_in_pool_list)
            if not ret:
                raise ExecutionError(
                    "Failed to detach some or all "
                    "servers %s from the cluster "
                    "from node %s", nodes_in_pool_list, cls.mnode)
            g.log.info(
                "Successfully detached all servers '%s' "
                "from the cluster from node %s", nodes_in_pool_list, cls.mnode)

        # Get pool list from mnode
        g.log.info("Pool list on node %s", cls.mnode)
        ret, out, err = pool_list(cls.mnode)
        if ret != 0:
            raise ExecutionError("Failed to get pool list on node %s: %s",
                                 cls.mnode, err)
        g.log.info("Successfully got pool list on node %s:\n%s", cls.mnode,
                   out)

        # Get peer status output from all servers
        for server in cls.servers:
            g.log.info("Peer status on node %s", server)
            ret, out, err = peer_status(server)
            if ret != 0:
                raise ExecutionError(
                    "Failed to get peer status on node %s: "
                    "%s", server, err)
            g.log.info("Successfully got peer status on node %s:\n%s", server,
                       out)
    def tearDown(self):
        """Detach servers from cluster"""
        pool = nodes_from_pool_list(self.mnode)
        self.assertIsNotNone(pool, "Failed to get pool list")
        for node in pool:
            if not peer_detach(self.mnode, node):
                raise ExecutionError("Failed to detach %s from %s" %
                                     (node, self.mnode))
        # Create a cluster
        if not peer_probe_servers(self.mnode, self.servers):
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        self.get_super_method(self, 'tearDown')()
    def tearDown(self):

        # stopping and cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the Volume %s" %
                                 self.volname)

        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)

        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        self.get_super_method(self, 'tearDown')()
示例#13
0
    def tearDown(self):

        # stopping and cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the Volume %s" %
                                 self.volname)

        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)

        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        bricks = get_servers_bricks_dict(self.servers, self.all_servers_info)

        # Checking brick dir and cleaning it.
        for server in self.servers:
            for brick in bricks[server]:
                if get_dir_contents(server, brick):
                    cmd = "rm -rf " + brick + "/*"
                    ret, _, _ = g.run(server, cmd)
                    if ret:
                        raise ExecutionError("Failed to delete the brick "
                                             "dirs of deleted volume.")

        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        # Reset firewall services to the zone
        if not self.firewall_added:
            ret = self._add_firewall_services(self.servers[:2])
            if not ret:
                raise ExecutionError("Failed to add firewall services")

        # Reload firewall services
        ret = self._reload_firewall_service(self.servers[:2])
        if not ret:
            raise ExecutionError("Failed to reload firewall services")

        # Cleanup the volumes and unmount it, if mounted
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = is_mounted(volume,
                                 mpoint="/mnt/distribute-vol",
                                 mserver=self.mnode,
                                 mclient=self.servers[1],
                                 mtype="glusterfs")
                if ret:
                    ret, _, _ = umount_volume(mclient=self.servers[1],
                                              mpoint="/mnt/distribute-vol")
                    if ret:
                        raise ExecutionError("Failed to unmount volume")

                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume cleaned up successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        """ Cleanup the volumes """
        if self.glusterd_is_stopped:
            ret = restart_glusterd(self.servers[1])
            if not ret:
                raise ExecutionError("Failed to start glusterd on node: %s"
                                     % self.servers[1])

            ret = wait_for_glusterd_to_start(self.servers[1])
            if not ret:
                raise ExecutionError("Glusterd is not yet started on node: %s"
                                     % self.servers[1])

        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume %s" % volume)

        # Disable multiplex
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.brick-multiplex': 'disable'})
        if not ret:
            raise ExecutionError("Failed to disable brick mux in cluster")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)

        # Calling baseclass tearDown method
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        """
        tearDown for every test
        """
        # stopping the volume and Cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)

        # Checking peers are in connected state or not
        ret = self.validate_peers_are_connected()
        if not ret:
            # Peer probe detached servers
            pool = nodes_from_pool_list(self.mnode)
            for node in pool:
                peer_detach(self.mnode, node)
            ret = peer_probe_servers(self.mnode, self.servers)
            if not ret:
                raise ExecutionError("Failed to probe detached servers %s" %
                                     self.servers)
        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        """
        clean up all volumes and peer probe to form cluster
        """
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        GlusterBaseClass.tearDown.im_func(self)
示例#19
0
    def tearDown(self):
        """
        Cleanup and umount volume
        """
        # Cleanup volume
        g.log.info("Starting to Cleanup Volume")
        ret = cleanup_volume(self.mnode, self.volname)
        if not ret:
            raise ExecutionError("Failed to cleanup Volume")
        g.log.info("Successful in Cleanup")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in self.extra_servers:
            if node not in pool:
                ret = peer_probe(self.mnode, node)
                if not ret:
                    raise ExecutionError("Failed to probe detached server %s" %
                                         node)
        g.log.info("Peer probe success for detached servers %s", self.servers)

        # Calling GlusterBaseClass teardown
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # Clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # detached servers from cluster
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)

        # form a cluster
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
    def test_sync_functinality(self):

        # create a 2x3 volume
        num_of_servers = len(self.servers)
        servers_info_from_cluster = {}
        for server in self.servers[0:num_of_servers - 1]:
            servers_info_from_cluster[server] = self.all_servers_info[server]

        self.volume['servers'] = self.servers[0:num_of_servers - 1]
        self.volume['voltype']['replica_count'] = 3
        self.volume['voltype']['dist_count'] = 2
        ret = setup_volume(self.mnode, servers_info_from_cluster, self.volume)
        self.assertTrue(ret, ("Failed to create "
                              "and start volume %s" % self.volname))
        g.log.info("Successfully created and started the volume %s",
                   self.volname)

        # stop glusterd on a random node of the cluster
        random_server_index = random.randint(1, num_of_servers - 2)
        random_server = self.servers[random_server_index]
        cmd = "systemctl stop glusterd"
        ret = g.run_async(random_server, cmd)
        g.log.info("Stopping glusterd on %s", random_server)

        # set a option on volume, stat-prefetch on
        self.options = {"stat-prefetch": "on"}
        ret = set_volume_options(self.mnode, self.volname, self.options)
        self.assertTrue(ret, ("Failed to set option stat-prefetch to on"
                              "for the volume %s" % self.volname))
        g.log.info(
            "Succeeded in setting stat-prefetch option to on"
            "for the volume %s", self.volname)

        # start glusterd on the node where glusterd is stopped
        ret = start_glusterd(random_server)
        self.assertTrue(ret, "Failed to start glusterd on %s" % random_server)

        ret = wait_for_glusterd_to_start(random_server)
        self.assertTrue(ret, "glusterd is not running on %s" % random_server)
        g.log.info("glusterd is started and running on %s", random_server)

        # volume info should be synced across the cluster
        out1 = get_volume_info(self.mnode, self.volname)
        self.assertIsNotNone(
            out1, "Failed to get the volume info from %s" % self.mnode)
        g.log.info("Getting volume info from %s is success", self.mnode)

        count = 0
        while count < 60:
            out2 = get_volume_info(random_server, self.volname)
            self.assertIsNotNone(
                out2, "Failed to get the volume info from %s" % random_server)
            if out1 == out2:
                break
            sleep(2)
            count += 1

        g.log.info("Getting volume info from %s is success", random_server)
        self.assertDictEqual(out1, out2, "volume info is not synced")

        # stop glusterd on a random server from cluster
        random_server_index = random.randint(1, num_of_servers - 2)
        random_server = self.servers[random_server_index]
        cmd = "systemctl stop glusterd"
        ret = g.run_async(random_server, cmd)
        g.log.info("Stopping glusterd on node %s", random_server)

        # peer probe a new node
        ret = peer_probe_servers(self.mnode, self.servers[num_of_servers - 1])
        self.assertTrue(
            ret, "Failed to peer probe %s from %s" %
            (self.servers[num_of_servers - 1], self.mnode))
        g.log.info("Peer probe from %s to %s is success", self.mnode,
                   self.servers[num_of_servers - 1])

        # start glusterd on the node where glusterd is stopped
        ret = start_glusterd(random_server)
        self.assertTrue(ret, "Failed to start glusterd on %s" % random_server)

        ret = wait_for_glusterd_to_start(random_server)
        self.assertTrue(ret, "glusterd is not running on %s" % random_server)
        g.log.info("glusterd is started and running on %s", random_server)

        # peer status should be synced across the cluster
        list1 = nodes_from_pool_list(self.mnode)
        self.assertIsNotNone(
            list1, "Failed to get nodes list in the cluster"
            "from %s" % self.mnode)
        g.log.info("Successfully got the nodes list in the cluster from %s",
                   self.mnode)

        # replacing ip with FQDN
        i = 0
        for node in list1:
            list1[i] = socket.getfqdn(node)
            i += 1
        list1 = sorted(list1)

        count = 0
        while count < 60:
            list2 = nodes_from_pool_list(random_server)
            self.assertIsNotNone(
                list2, "Failed to get nodes list in the "
                "cluster from %s" % random_server)
            # replacing ip with FQDN
            i = 0
            for node in list2:
                list2[i] = socket.getfqdn(node)
                i += 1

            list2 = sorted(list2)
            if list2 == list1:
                break
            sleep(2)
            count += 1

        g.log.info("Successfully got the nodes list in the cluster from %s",
                   random_server)

        self.assertListEqual(list1, list2, "Peer status is "
                             "not synced across the cluster")
        g.log.info("Peer status is synced across the cluster")