Esempio n. 1
0
    def setUp(self):

        GlusterBaseClass.setUp.im_func(self)

        # check whether peers are in connected state
        ret = self.validate_peers_are_connected()
        if not ret:
            raise ExecutionError("Peers are not in connected state")

        # detach all the nodes
        ret = peer_detach_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Peer detach failed to all the servers from "
                                 "the node.")
        g.log.info("Peer detach SUCCESSFUL.")

        # Uploading file_dir script in all client direcotries
        g.log.info(
            "Upload io scripts to clients %s for running IO on "
            "mounts", self.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                   "file_dir_ops.py")
        ret = upload_scripts(self.clients, script_local_path)
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 self.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   self.clients)
    def setUp(self):

        # Performing peer detach
        ret = peer_detach_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to detach servers %s" % self.servers)
        g.log.info("Peer detach SUCCESSFUL.")
        self.get_super_method(self, 'setUp')()
    def setUp(self):

        # Performing peer detach
        ret = peer_detach_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to detach servers %s" % self.servers)
        g.log.info("Peer detach SUCCESSFUL.")
        GlusterBaseClass.setUp.im_func(self)
    def setUp(self):
        self.get_super_method(self, 'setUp')()

        # Performing peer detach
        if not peer_detach_servers(self.mnode, self.servers):
            raise ExecutionError("Failed to detach servers %s" % self.servers)
        g.log.info("Peer detach SUCCESSFUL.")
        self.peers_in_pool = []
        self.by_type = ""
        self.node = None
Esempio n. 5
0
    def setUpClass(cls):
        """
        """
        # Read all the cluster config from the g.config and assign it to
        # class variables
        GlusterBaseClass.setUpClass.im_func(cls)

        # Detach all the servers if it's already attached to the cluster
        nodes_in_pool_list = nodes_from_pool_list(cls.mnode)
        if nodes_in_pool_list is None:
            g.log.error(
                "Unable to get nodes from gluster pool list "
                "from node %s", cls.mnode)
        else:
            g.log.info("Nodes in pool: %s", nodes_in_pool_list)

        if nodes_in_pool_list:
            if cls.mnode in nodes_in_pool_list:
                nodes_in_pool_list.remove(cls.mnode)
            g.log.info("Detaching servers '%s' from the cluster from node %s",
                       nodes_in_pool_list, cls.mnode)
            ret = peer_detach_servers(cls.mnode, nodes_in_pool_list)
            if not ret:
                raise ExecutionError(
                    "Failed to detach some or all "
                    "servers %s from the cluster "
                    "from node %s", nodes_in_pool_list, cls.mnode)
            g.log.info(
                "Successfully detached all servers '%s' "
                "from the cluster from node %s", nodes_in_pool_list, cls.mnode)

        # Get pool list from mnode
        g.log.info("Pool list on node %s", cls.mnode)
        ret, out, err = pool_list(cls.mnode)
        if ret != 0:
            raise ExecutionError("Failed to get pool list on node %s: %s",
                                 cls.mnode, err)
        g.log.info("Successfully got pool list on node %s:\n%s", cls.mnode,
                   out)

        # Get peer status output from all servers
        for server in cls.servers:
            g.log.info("Peer status on node %s", server)
            ret, out, err = peer_status(server)
            if ret != 0:
                raise ExecutionError(
                    "Failed to get peer status on node %s: "
                    "%s", server, err)
            g.log.info("Successfully got peer status on node %s:\n%s", server,
                       out)
    def setUp(self):
        self.get_super_method(self, 'setUp')()

        # check whether peers are in connected state
        ret = self.validate_peers_are_connected()
        if not ret:
            raise ExecutionError("Peers are not in connected state")

        # detach all the nodes
        ret = peer_detach_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Peer detach failed to all the servers from "
                                 "the node %s." % self.mnode)
        g.log.info("Peer detach SUCCESSFUL.")
Esempio n. 7
0
    def setUp(self):

        GlusterBaseClass.setUp.im_func(self)

        # check whether peers are in connected state
        ret = self.validate_peers_are_connected()
        if not ret:
            raise ExecutionError("Peers are not in connected state")

        # detach all the nodes
        ret = peer_detach_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Peer detach failed to all the servers from "
                                 "the node %s." % self.mnode)
        g.log.info("Peer detach SUCCESSFUL.")
    def setUp(self):
        self.get_super_method(self, 'setUp')()

        # check whether peers are in connected state
        ret = self.validate_peers_are_connected()
        if not ret:
            raise ExecutionError("Peers are not in connected state")

        g.log.info("Peers are in connected state")

        # detach a node from cluster, assume last node
        last_node = self.servers[len(self.servers) - 1]
        ret = peer_detach_servers(self.mnode, last_node)
        if not ret:
            raise ExecutionError("Peer detach failed to the last node "
                                 "%s from %s" % (last_node, self.mnode))
        g.log.info("Peer detach SUCCESSFUL.")
    def test_logs_while_peer_detach(self):
        '''
        -> Detach the node from peer
        -> Check that any error messages related to peer detach
        in glusterd log file
        -> No errors should be there in glusterd log file
        '''

        # Getting timestamp
        _, timestamp, _ = g.run_local('date +%s')
        timestamp = timestamp.strip()

        #  glusterd logs
        ret, _, _ = g.run(self.mnode,
                          'cp /var/log/glusterfs/glusterd.log '
                          '/var/log/glusterfs/glusterd_%s.log' % timestamp)
        if ret:
            raise ExecutionError("Failed to copy glusterd logs")

        # Clearing the existing glusterd log file
        ret, _, _ = g.run(self.mnode, 'echo > /var/log/glusterfs/glusterd.log')
        if ret:
            raise ExecutionError("Failed to clear glusterd.log file on %s"
                                 % self.mnode)

        # Performing peer detach
        self.random_server = random.choice(self.servers[1:])
        ret = peer_detach_servers(self.mnode, self.random_server)
        self.assertTrue(ret, "Failed to detach peer %s"
                        % self.random_server)
        g.log.info("Peer detach successful for %s", self.random_server)

        # Searching for error message in log
        ret, out, _ = g.run(
            self.mnode,
            "grep ' E ' /var/log/glusterfs/glusterd.log | wc -l")
        self.assertEqual(ret, 0, "Failed to get error message count in "
                                 "glusterd log file")
        g.log.info("Successful getting error message count in log file")

        self.assertEqual(int(out), 0, "Found Error messages in glusterd log "
                                      "file after peer detach")
        g.log.info("No error messages found in gluterd log file after peer "
                   "detach")
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        cls.get_super_method(cls, 'setUpClass')()
        ret = volume_exists(cls.mnode, cls.volname)
        if ret:
            ret = cleanup_volume(cls.mnode, cls.volname)
            if not ret:
                raise ExecutionError("Unable to delete volume")
            g.log.info("Successfully deleted volume % s", cls.volname)

        # Check if peer is connected state or not and detach all the nodes
        for server in cls.servers:
            ret = is_peer_connected(server, cls.servers)
            if ret:
                ret = peer_detach_servers(server, cls.servers)
                if not ret:
                    raise ExecutionError(
                        "Detach failed from all the servers from the node.")
                g.log.info("Peer detach SUCCESSFUL.")

        # Before starting the testcase, proceed only it has minimum of 4 nodes
        if len(cls.servers) < 4:
            raise ExecutionError("Minimun four nodes required for this "
                                 " testcase to execute")
Esempio n. 11
0
    def test_volume_create(self):

        # create and start a volume
        self.volume['name'] = "first_volume"
        self.volname = "first_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")

        # bring a brick down and volume start force should bring it to online

        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        ret = bring_bricks_offline(self.volname, bricks_list[0:2])
        self.assertTrue(ret, "Failed to bring down the bricks")
        g.log.info("Successfully brought the bricks down")

        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Failed to start the volume")
        g.log.info("Volume start with force is success")

        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to bring the bricks online")
        g.log.info("Volume start with force successfully brought all the "
                   "bricks online")

        # create volume with previously used bricks and different volume name
        self.volname = "second_volume"
        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to create a "
            "volume with previously used bricks. Actual:"
            "Successfully created the volume with previously"
            " used bricks")
        g.log.info("Failed to create the volume with previously used bricks")

        # create a volume with already existing volume name
        self.volume['name'] = "first_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(
            ret, "Expected: It should fail to create a volume"
            " with already existing volume name. Actual: "
            "Successfully created the volume with "
            "already existing volname")
        g.log.info("Failed to create the volume with already existing volname")

        # creating a volume with non existing brick path should fail

        self.volname = "second_volume"
        bricks_list = form_bricks_list(self.mnode, self.volname,
                                       len(self.servers), self.servers,
                                       self.all_servers_info)
        nonexisting_brick_index = random.randint(0, len(bricks_list) - 1)
        non_existing_brick = bricks_list[nonexisting_brick_index].split(":")[0]
        non_existing_path = ":/brick/non_existing_path"
        non_existing_brick = non_existing_brick + non_existing_path
        bricks_list[nonexisting_brick_index] = non_existing_brick

        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Expected: Creating a volume with non "
            "existing brick path should fail. Actual: "
            "Successfully created the volume with "
            "non existing brick path")
        g.log.info("Failed to create the volume with non existing brick path")

        # cleanup the volume and peer detach all servers. form two clusters,try
        # to create a volume with bricks whose nodes are in different clusters

        # cleanup volumes
        vol_list = get_volume_list(self.mnode)
        self.assertIsNotNone(vol_list, "Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            self.assertTrue(ret, "Unable to delete volume % s" % volume)

        # peer detach all servers
        ret = peer_detach_servers(self.mnode, self.servers)
        self.assertTrue(ret, "Peer detach to all servers is failed")
        g.log.info("Peer detach to all the servers is success")

        # form cluster 1
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertEqual(
            ret, 0, "Peer probe from %s to %s is failed" %
            (self.servers[0], self.servers[1]))
        g.log.info("Peer probe is success from %s to %s" %
                   (self.servers[0], self.servers[1]))

        # form cluster 2
        ret, _, _ = peer_probe(self.servers[2], self.servers[3])
        self.assertEqual(
            ret, 0, "Peer probe from %s to %s is failed" %
            (self.servers[2], self.servers[3]))
        g.log.info("Peer probe is success from %s to %s" %
                   (self.servers[2], self.servers[3]))

        # Creating a volume with bricks which are part of another
        # cluster should fail
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertFalse(
            ret, "Expected: Creating a volume with bricks"
            " which are part of another cluster should fail."
            " Actual: Successfully created the volume with "
            "bricks which are part of another cluster")
        g.log.info("Failed to create the volume with bricks which are "
                   "part of another cluster")

        # form a cluster, bring a node down. try to create a volume when one of
        # the brick node is down
        ret, _, _ = peer_detach(self.servers[2], self.servers[3])
        self.assertEqual(ret, 0, "Peer detach is failed")
        g.log.info("Peer detach is success")

        ret = peer_probe_servers(self.mnode, self.servers)
        self.assertTrue(ret, "Peer probe is failed")
        g.log.info("Peer probe to all the servers is success")

        random_server = self.servers[random.randint(1, len(self.servers) - 1)]
        ret = stop_glusterd(random_server)
        self.assertTrue(ret, "Glusterd is stopped successfully")

        self.volume['name'] = "third_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertFalse(
            ret, "Expected: It should fail to create a volume "
            "when one of the node is down. Actual: Successfully "
            "created the volume with bbrick whose node is down")

        g.log.info("Failed to create the volume with brick whose node is down")
Esempio n. 12
0
    def test_glustershd_on_newly_probed_server(self):
        """
        Test script to verify glustershd process on newly probed server

        * check glustershd process - only 1 glustershd process should
          be running
        * Add new node to cluster
        * check glustershd process - only 1 glustershd process should
          be running on all servers inclusing newly probed server
        * stop the volume
        * add another node to cluster
        * check glustershd process - glustershd process shouldn't be running
          on servers including newly probed server
        * start the volume
        * check glustershd process - only 1 glustershd process should
          be running on all servers inclusing newly probed server

        """
        # pylint: disable=too-many-statements

        nodes = self.volume['servers'][:-2]

        # check the self-heal daemon process
        g.log.info("Starting to get self heal daemon process on "
                   "nodes %s", nodes)
        ret, pids = get_self_heal_daemon_pid(nodes)
        self.assertTrue(ret, ("Either no self heal daemon process found or "
                              "more than one self heal daemon process "
                              "found : %s" % pids))
        g.log.info(
            "Successful in getting single self heal daemon process"
            " on all nodes %s", nodes)

        # Add new node to the cluster
        g.log.info("Peer probe for %s", self.extra_servers[0])
        ret = peer_probe_servers(self.mnode, self.extra_servers[0])
        self.assertTrue(
            ret, "Failed to peer probe server : %s" % self.extra_servers[0])
        g.log.info(
            "Peer probe success for %s and all peers are in "
            "connected state", self.extra_servers[0])
        nodes.append(self.extra_servers[0])

        # check the self-heal daemon process and it should be running on
        # newly probed servers
        g.log.info("Starting to get self-heal daemon process on "
                   "nodes %s", nodes)
        ret, pids = get_self_heal_daemon_pid(nodes)
        self.assertTrue(ret, ("Either no self heal daemon process found or "
                              "more than one self heal daemon process "
                              "found : %s" % pids))
        g.log.info(
            "Successful in getting single self heal daemon process"
            " on all nodes %s", nodes)

        # stop the volume
        g.log.info("Stopping the volume %s", self.volname)
        ret = volume_stop(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
        g.log.info("Successfully stopped volume %s", self.volname)

        # Add another new node to the cluster
        g.log.info("peer probe for %s", self.extra_servers[1])
        ret = peer_probe_servers(self.mnode, self.extra_servers[1])
        self.assertTrue(
            ret, "Failed to peer probe server : %s" % self.extra_servers[1])
        g.log.info(
            "Peer probe success for %s and all peers are in "
            "connected state", self.extra_servers[1])
        nodes.append(self.extra_servers[1])

        # check the self-heal daemon process after stopping volume and
        # no self heal daemon should be running including newly probed node
        g.log.info("Starting to get self-heal daemon process on "
                   "nodes %s", nodes)
        ret, pids = get_self_heal_daemon_pid(nodes)
        self.assertFalse(ret, ("Self Heal Daemon process is running even "
                               "after stopping volume %s" % self.volname))
        for node in pids:
            self.assertEquals(pids[node][0], -1, ("Self Heal Daemon is still "
                                                  "running on node %s even "
                                                  "after stopping all "
                                                  "volumes" % node))
        g.log.info("Expected : No self heal daemon process is running "
                   "after stopping all volumes")

        # start the volume
        g.log.info("Starting volume %s", self.volname)
        ret = volume_start(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to start volume  %s" % self.volname))
        g.log.info("Volume %s started successfully", self.volname)

        # Verify volume's all process are online for 60 sec
        g.log.info("Verifying volume's all process are online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname,
                                                   60)
        self.assertTrue(ret, ("Volume %s : All process are not "
                              "online", self.volname))
        g.log.info("Successfully Verified volume %s processes are online",
                   self.volname)

        # Verfiy glustershd process releases its parent process
        g.log.info("verifying self heal daemon process is daemonized")
        ret = is_shd_daemonized(nodes)
        self.assertTrue(ret, ("Either no self heal daemon process found or "
                              "more than one self heal daemon process "
                              "found : %s" % pids))

        # check the self-heal daemon process
        g.log.info("Starting to get self-heal daemon process on "
                   "nodes %s", nodes)
        ret, pids = get_self_heal_daemon_pid(nodes)
        self.assertTrue(ret, ("Either no self heal daemon process found or "
                              "more than one self heal daemon process "
                              "found : %s" % pids))
        g.log.info(
            "Successful in getting single self heal daemon process"
            " on all nodes %s", nodes)

        # detach extra servers from the cluster
        g.log.info("peer detaching extra servers %s from cluster",
                   self.extra_servers)
        ret = peer_detach_servers(self.mnode, self.extra_servers)
        self.assertTrue(
            ret,
            "Failed to peer detach extra servers : %s" % self.extra_servers)
        g.log.info("Peer detach success for %s ", self.extra_servers)
    def test_snap_info_from_detached_node(self):
        # pylint: disable=too-many-statements
        """
        Create a volume with single brick
        Create a snapshot
        Activate the snapshot created
        Enabled uss on the volume
        Validated snap info on all the nodes
        Peer detach one node
        Validate /var/lib/glusterd/snaps on the detached node
        Probe the detached node
        """

        # Creating volume with single brick on one node
        servers_info_single_node = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        bricks_list = form_bricks_list(self.mnode, self.volname, 1,
                                       self.servers[0],
                                       servers_info_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Create a snapshot of the volume without volume start should fail
        self.snapname = "snap1"
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertNotEqual(ret, 0,
                            "Snapshot created without starting the volume")
        g.log.info("Snapshot creation failed as expected")

        # Start the volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to start the volume %s" % self.volname)
        g.log.info("Volume start succeeded")

        # Create a snapshot of the volume after volume start
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertEqual(
            ret, 0, "Snapshot creation failed on the volume %s" % self.volname)
        g.log.info("Snapshot create succeeded")

        # Activate snapshot created
        ret, _, err = snap_activate(self.mnode, self.snapname)
        self.assertEqual(
            ret, 0, "Snapshot activate failed with following error %s" % (err))
        g.log.info("Snapshot activated successfully")

        # Enable uss
        self.vol_options['features.uss'] = 'enable'
        ret = set_volume_options(self.mnode, self.volname, self.vol_options)
        self.assertTrue(
            ret, "gluster volume set %s features.uss "
            "enable failed" % self.volname)
        g.log.info("gluster volume set %s features.uss "
                   "enable successfully", self.volname)

        # Validate files /var/lib/glusterd/snaps on all the servers is same
        self.pathname = "/var/lib/glusterd/snaps/%s" % self.snapname
        for server in self.servers:
            conn = g.rpyc_get_connection(server)
            ret = conn.modules.os.path.isdir(self.pathname)
            self.assertTrue(
                ret, "%s directory doesn't exist on node %s" %
                (self.pathname, server))
            g.log.info("%s path exists on node %s", self.pathname, server)
        g.rpyc_close_deployed_servers()

        # Peer detach one node
        self.random_node_peer_detach = random.choice(self.servers[1:])
        ret = peer_detach_servers(self.mnode,
                                  self.random_node_peer_detach,
                                  validate=True)
        self.assertTrue(
            ret,
            "Peer detach of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer detach succeeded")

        # /var/lib/glusterd/snaps/<snapname> directory should not present
        conn = g.rpyc_get_connection(self.random_node_peer_detach)
        ret = conn.modules.os.path.isdir(self.pathname)
        self.assertFalse(
            ret, "%s directory should not exist on the peer"
            "which is detached from cluster%s" %
            (self.pathname, self.random_node_peer_detach))
        g.log.info("Expected: %s path doesn't exist on peer detached node %s",
                   self.pathname, self.random_node_peer_detach)
        g.rpyc_close_deployed_servers()

        # Peer probe the detached node
        ret, _, _ = peer_probe(self.mnode, self.random_node_peer_detach)
        self.assertEqual(
            ret, 0,
            "Peer probe of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer probe succeeded")

        # Validating peers are in connected state
        count = 0
        while count < 10:
            sleep(2)
            ret = self.validate_peers_are_connected()
            if ret:
                break
            count += 1
        self.assertTrue(ret, "Peers are not in connected state")
        g.log.info("Peer are in connected state")
Esempio n. 14
0
    def test_glusterd_statedump_when_quorum_set_on_volumes(self):
        """
        Test Case:
        1. Create and start a volume
        2. Enable quota on the volume
        2. Fuse mount the volume
        3. Get the glusterd statedump and analyze the statedump
        4. Enable client-side quorum on the volume
        5. Get the glusterd statedump
        6. Delete the volume and peer detach 2 nodes
        7. Create a replica 2 volume and start it
        8. Kill the first brick of the volume
        9. Get the glusterd statedump
        10. Start the volume with force
        11. Enable server-side quorum on the volume
        12. Get the glusterd statedump
        13. Stop glusterd on one of the node
        14. Get the glusterd statedump
        15. Stop glusterd on another node
        16. Get the glusterd statedump
        """
        # pylint: disable=too-many-statements
        # Enable Quota
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, ("Failed to enable quota on the volume %s", self.volname))
        g.log.info("Successfully enabled quota on the volume %s", self.volname)

        # Mount the volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0,
                         "Failed to mount the volume: %s" % self.volname)
        g.log.info("Successfully mounted the volume: %s", self.volname)

        # Get the statedump of glusterd process
        self.dump_count = 1
        self._get_statedump_of_glusterd(self.dump_count)

        # Analyze the statedump created
        self._analyze_statedump()

        # Enable client-side quorum on volume
        option = {"cluster.quorum-type": "auto"}
        self._set_option_for_volume(self.volname, option)

        # Get the statedump of glusterd process
        self.dump_count += 1
        self._get_statedump_of_glusterd(self.dump_count)

        # Delete the volume
        ret = cleanup_volume(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to delete the volume: %s" % self.volname)
        g.log.info("Successfully deleted the volume: %s", self.volname)

        # Peer detach two nodes
        ret = peer_detach_servers(self.mnode, self.servers[1:3])
        self.assertTrue(ret,
                        "Failed to detach the servers %s" % self.servers[1:3])
        g.log.info("Successfully detached peers %s", self.servers[1:3])

        # Create a new replica 2 volume in the updated cluster
        self.volume_config = {
            'name': 'test_glusterd_statedump_when_quorum_'
            'set_on_volumes_replica-volume',
            'servers': self.servers[3:],
            'voltype': {
                'type': 'replicated',
                'replica_count': 2
            },
        }

        # Create and start a volume
        ret = setup_volume(self.mnode, self.all_servers_info,
                           self.volume_config)
        self.assertTrue(
            ret, "Failed to create and start the volume: %s" %
            self.volume_config['name'])
        g.log.info("Volume %s created and started successfully",
                   self.volume_config['name'])

        # Get the list of bricks in volume
        all_bricks = get_all_bricks(self.mnode, self.volume_config['name'])
        self.assertIsNotNone(all_bricks, "Unable to get list of bricks")

        # Kill the first brick in volume
        ret = bring_bricks_offline(self.volume_config['name'], all_bricks[0])
        self.assertTrue(ret,
                        "Unable to bring brick %s offline" % all_bricks[0])
        g.log.info("Successfully brought the brick %s offline ", all_bricks[0])

        # Get the statedump of glusterd process
        self.dump_count += 1
        self._get_statedump_of_glusterd(self.dump_count)

        # Start the volume with force
        ret, _, _ = volume_start(self.mnode, self.volume_config['name'], True)
        self.assertEqual(
            ret, 0, "Failed to start volume %s with force" %
            self.volume_config['name'])
        g.log.info("Successfully started volume %s with force",
                   self.volume_config['name'])

        # Enable server-side quorum on volume
        option = {"cluster.server-quorum-type": "server"}
        self._set_option_for_volume(self.volume_config['name'], option)

        # Get the statedump of glusterd process
        self.dump_count += 1
        self._get_statedump_of_glusterd(self.dump_count)

        # Stop glusterd process on one of the node.
        self._stop_gluster(self.servers[3])

        # Get the statedump of glusterd process
        self.dump_count += 1
        self._get_statedump_of_glusterd(self.dump_count)

        # Stop glusterd process on one of the node.
        self._stop_gluster(self.servers[4])

        # Get the statedump of glusterd process
        self.dump_count += 1
        self._get_statedump_of_glusterd(self.dump_count)