Esempio n. 1
0
    def tearDown(self):

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        # clean up all volumes and detaches peers from cluster

        vol_list = get_volume_list(self.mnode)
        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to Cleanup the "
                                     "Volume %s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                # check all bricks are online
                ret = wait_for_bricks_to_be_online(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to bring bricks online"
                                         "for volume %s" % volume)
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
Esempio n. 3
0
    def tearDown(self):

        # Cleanup and umount volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)
        g.log.info("Successful in umounting the volume and Cleanup")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        GlusterBaseClass.tearDown.im_func(self)
Esempio n. 4
0
    def tearDown(self):
        # Add the removed services in firewall
        for service in ('glusterfs', 'rpc-bind'):
            for option in ("", " --permanent"):
                cmd = ("firewall-cmd --zone=public --add-service={}{}".format(
                    service, option))
                ret, _, _ = g.run(self.node_to_probe, cmd)
                if ret:
                    raise ExecutionError("Failed to add firewall service %s "
                                         "on %s" %
                                         (service, self.node_to_probe))

        # Detach servers from cluster
        pool = nodes_from_pool_list(self.mnode)
        self.assertIsNotNone(pool, "Failed to get pool list")
        for node in pool:
            if not peer_detach(self.mnode, node):
                raise ExecutionError("Failed to detach %s from %s" %
                                     (node, self.mnode))
        # Create a cluster
        if not peer_probe_servers(self.mnode, self.servers):
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        self.get_super_method(self, 'tearDown')()
Esempio n. 5
0
    def setUp(self):
        # Calling GlusterBaseClass setUp
        GlusterBaseClass.setUp.im_func(self)

        self.extra_servers = self.servers[-2:]
        self.servers = self.servers[:-2]
        # Performing peer detach
        for server in self.extra_servers:
            # Peer detach
            ret, _, _ = peer_detach(self.mnode, server)
            if ret:
                raise ExecutionError("Peer detach failed")
            g.log.info("Peer detach successful.")

        # Create volume using first four nodes
        servers_info_from_four_nodes = {}
        for server in self.servers:
            servers_info_from_four_nodes[server] = self.all_servers_info[
                server]

        self.volume['servers'] = self.servers
        ret = setup_volume(self.mnode,
                           servers_info_from_four_nodes,
                           self.volume,
                           force=False)
        if not ret:
            raise ExecutionError("Volume create failed on four nodes")
        g.log.info("Distributed replicated volume created successfully")

        # Verfiy glustershd process releases its parent process
        ret = is_shd_daemonized(self.servers)
        if not ret:
            raise ExecutionError("Self Heal Daemon process was still"
                                 " holding parent process.")
        g.log.info("Self Heal Daemon processes are online")
Esempio n. 6
0
    def tearDown(self):
        """peer teardown
        """
        # Detach all the servers if it's already attached to the cluster
        nodes_in_pool_list = nodes_from_pool_list(self.mnode)
        if nodes_in_pool_list is None:
            g.log.error(
                "Unable to get nodes from gluster pool list "
                "from node %s", self.mnode)
        else:
            g.log.info("Nodes in pool: %s", nodes_in_pool_list)

        if nodes_in_pool_list:
            if self.mnode in nodes_in_pool_list:
                nodes_in_pool_list.remove(self.mnode)
            g.log.info("Detaching servers %s from node %s", nodes_in_pool_list,
                       self.mnode)
            for server in nodes_in_pool_list:
                ret, out, err = peer_detach(self.mnode, server)
                self.assertFalse(
                    (ret != 0 or
                     re.search(r'^peer\sdetach\:\ssuccess(.*)', out) is None),
                    ("Failed to detach server %s from node %s: %s", server,
                     self.mnode, err))
                g.log.info("Successfully detached server %s from node %s: %s",
                           server, self.mnode, out)
            g.log.info("Successfully detached servers %s from node %s",
                       nodes_in_pool_list, self.mnode)

        GlusterBaseClass.tearDown.im_func(self)
    def setUp(self):
        """ Detach peers, and leave it as a 3 node cluster """
        for server in self.servers[3:]:
            ret, _, _ = peer_detach(self.mnode, server)
            if ret:
                raise ExecutionError("Peer detach failed")

        self.get_super_method(self, 'setUp')()
Esempio n. 8
0
    def tearDown(self):

        # stopping and cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the Volume %s" %
                                 self.volname)

        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)

        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # stopping and cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the Volume %s" %
                                 self.volname)

        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)

        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        self.get_super_method(self, 'tearDown')()
Esempio n. 10
0
    def setUp(self):

        # Performing peer detach
        for server in self.servers[1:]:
            ret, _, _ = peer_detach(self.mnode, server)
            if ret != 0:
                raise ExecutionError("Peer detach failed")
            g.log.info("Peer detach SUCCESSFUL.")
        GlusterBaseClass.setUp.im_func(self)
    def setUp(self):

        # Performing peer detach
        for server in self.servers[1:]:
            ret, _, _ = peer_detach(self.mnode, server)
            if ret != 0:
                raise ExecutionError("Peer detach failed")
            g.log.info("Peer detach SUCCESSFUL.")
        self.get_super_method(self, 'setUp')()
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        bricks = get_servers_bricks_dict(self.servers, self.all_servers_info)

        # Checking brick dir and cleaning it.
        for server in self.servers:
            for brick in bricks[server]:
                if get_dir_contents(server, brick):
                    cmd = "rm -rf " + brick + "/*"
                    ret, _, _ = g.run(server, cmd)
                    if ret:
                        raise ExecutionError("Failed to delete the brick "
                                             "dirs of deleted volume.")

        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        # Reset firewall services to the zone
        if not self.firewall_added:
            ret = self._add_firewall_services(self.servers[:2])
            if not ret:
                raise ExecutionError("Failed to add firewall services")

        # Reload firewall services
        ret = self._reload_firewall_service(self.servers[:2])
        if not ret:
            raise ExecutionError("Failed to reload firewall services")

        # Cleanup the volumes and unmount it, if mounted
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = is_mounted(volume,
                                 mpoint="/mnt/distribute-vol",
                                 mserver=self.mnode,
                                 mclient=self.servers[1],
                                 mtype="glusterfs")
                if ret:
                    ret, _, _ = umount_volume(mclient=self.servers[1],
                                              mpoint="/mnt/distribute-vol")
                    if ret:
                        raise ExecutionError("Failed to unmount volume")

                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume cleaned up successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
    def setUp(self):

        # Adding services list here, so that it can be
        # used in teardown if setup fails
        self.services_lst = ['glusterfs', 'nfs', 'rpc-bind']

        # Performing peer detach
        for server in self.servers[1:]:
            ret, _, _ = peer_detach(self.mnode, server)
            if ret != 0:
                raise ExecutionError("Peer detach failed")
        self.get_super_method(self, 'setUp')()
    def tearDown(self):
        """ Cleanup the volumes """
        if self.glusterd_is_stopped:
            ret = restart_glusterd(self.servers[1])
            if not ret:
                raise ExecutionError("Failed to start glusterd on node: %s"
                                     % self.servers[1])

            ret = wait_for_glusterd_to_start(self.servers[1])
            if not ret:
                raise ExecutionError("Glusterd is not yet started on node: %s"
                                     % self.servers[1])

        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume %s" % volume)

        # Disable multiplex
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.brick-multiplex': 'disable'})
        if not ret:
            raise ExecutionError("Failed to disable brick mux in cluster")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)

        # Calling baseclass tearDown method
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        """
        tearDown for every test
        """
        # stopping the volume and Cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)

        # Checking peers are in connected state or not
        ret = self.validate_peers_are_connected()
        if not ret:
            # Peer probe detached servers
            pool = nodes_from_pool_list(self.mnode)
            for node in pool:
                peer_detach(self.mnode, node)
            ret = peer_probe_servers(self.mnode, self.servers)
            if not ret:
                raise ExecutionError("Failed to probe detached servers %s" %
                                     self.servers)
        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
    def test_peer_probe_snapd_running(self):
        '''
        -> Create Volume
        -> Create snap for that volume
        -> Enable uss
        -> Check snapd running or not
        -> Probe a new node while snapd is running
        '''

        # Performing node detach, Here detached node considering as extra
        # server
        extra_node = self.servers[-1]
        ret, _, _ = peer_detach(self.mnode, extra_node)
        self.assertEqual(ret, 0, "Peer detach failed for %s" % extra_node)
        g.log.info("Peer detach success for %s", extra_node)

        # Removing detached node from 'self.servers' list, it's because of
        # 'self.setup_volume' function checking peer status of 'self.servers'
        # list before creating volume
        self.servers.remove(extra_node)

        # Creating volume
        ret = self.setup_volume()
        self.assertTrue(ret, "Failed Create volume %s" % self.volname)
        g.log.info("Volume created successfully %s", self.volname)

        # Adding node back into self.servers list
        self.servers.append(extra_node)

        # creating Snap
        ret, _, _ = snap_create(self.mnode, self.volname, 'snap1')
        self.assertEqual(ret, 0,
                         "Snap creation failed for volume %s" % self.volname)
        g.log.info("Snap created successfully for volume %s", self.volname)

        # Enabling Snapd(USS)
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to enable USS for volume %s" % self.volname)
        g.log.info("USS Enabled successfully on volume %s", self.volname)

        # Checking snapd running or not
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(ret, "Snapd not running for volume %s" % self.volname)
        g.log.info("snapd running for volume %s", self.volname)

        # Probing new node
        ret = peer_probe_servers(self.mnode, extra_node)
        self.assertTrue(ret,
                        "Peer Probe failed for new server %s" % extra_node)
        g.log.info("Peer Probe success for new server %s", extra_node)
    def tearDown(self):
        """
        clean up all volumes and peer probe to form cluster
        """
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        GlusterBaseClass.tearDown.im_func(self)
    def test_peer_detach_check_warning_message(self):
        # pylint: disable=too-many-statements
        """
        Test Case:
        1) Create a cluster.
        2) Peer detach a node but don't press y.
        3) Check the warning message.
        4) Check peer status.
           (Node shouldn't be detached!)
        5) Peer detach a node now press y.
        6) Check peer status.
           (Node should be detached!)
        """

        # Peer detach one node
        ret, msg, _ = g.run(self.mnode,
                            "gluster peer detach %s" % self.servers[1])
        self.assertEqual(ret, 0,
                         "ERROR: Peer detach successful %s" % self.servers[1])
        g.log.info("EXPECTED: Failed to detach %s", self.servers[1])

        # Checking warning message
        expected_msg = ' '.join([
            'All clients mounted through the peer which is getting',
            'detached need to be remounted using one of the other',
            'active peers in the trusted storage pool to ensure',
            'client gets notification on any changes done on the',
            'gluster configuration and if the same has been done',
            'do you want to proceed'
        ])
        self.assertIn(expected_msg,
                      msg.split('?')[0],
                      "Incorrect warning message for peer detach.")
        g.log.info("Correct warning message for peer detach.")

        # Checking if peer is connected
        ret = is_peer_connected(self.mnode, self.servers[1])
        self.assertTrue(ret, "Peer is not in connected state.")
        g.log.info("Peers is in connected state.")

        # Peer detach one node
        ret, _, _ = peer_detach(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, "Failed to detach %s" % self.servers[1])
        g.log.info("Peer detach successful %s", self.servers[1])

        # Checking if peer is connected
        ret = is_peer_connected(self.mnode, self.servers[1])
        self.assertFalse(ret, "Peer is in connected state.")
        g.log.info("Peer is not in connected state.")
Esempio n. 20
0
    def tearDown(self):

        # Clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # detached servers from cluster
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)

        # form a cluster
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        """Detach servers from cluster"""
        pool = nodes_from_pool_list(self.mnode)
        self.assertIsNotNone(pool, "Failed to get pool list")
        for node in pool:
            if not peer_detach(self.mnode, node):
                raise ExecutionError("Failed to detach %s from %s" %
                                     (node, self.mnode))
        # Create a cluster
        if not peer_probe_servers(self.mnode, self.servers):
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        self.get_super_method(self, 'tearDown')()
Esempio n. 22
0
 def check_detach_error_message(self, use_force=True):
     ret, _, err = peer_detach(self.mnode, self.servers[1], force=use_force)
     self.assertNotEqual(ret, 0,
                         "detach server should fail: %s" % self.servers[1])
     msg = ('peer detach: failed: Brick(s) with the peer ' +
            self.servers[1] + ' ' + 'exist in cluster')
     if msg not in err:
         msg = ('peer detach: failed: Peer ' + self.servers[1] +
                ' hosts one or more bricks. ' +
                'If the peer is in not recoverable ' +
                'state then use either ' +
                'replace-brick or remove-brick command ' +
                'with force to remove ' + 'all bricks from the peer and ' +
                'attempt the peer detach again.')
         self.assertIn(
             msg, err, "Peer detach not failed with "
             "proper error message")
    def test_peer_probe_when_glusterd_down(self):
        # pylint: disable=too-many-statements
        '''
        Test script to verify the behavior when we try to peer
        probe a valid node whose glusterd is down
        Also post validate to make sure no core files are created
        under "/", /var/log/core and /tmp  directory

        Ref: BZ#1257394 Provide meaningful error on peer probe and peer detach
        Test Steps:
        1 check the current peer status
        2 detach one of the valid nodes which is already part of cluster
        3 stop glusterd on that node
        4 try to attach above node to cluster, which must fail with
          Transport End point error
        5 Recheck the test using hostname, expected to see same result
        6 start glusterd on that node
        7 halt/reboot the node
        8 try to peer probe the halted node, which must fail again.
        9 The only error accepted is
          "peer probe: failed: Probe returned with Transport endpoint is not
          connected"
        10 Check peer status and make sure no other nodes in peer reject state
        '''

        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # Detach one of the nodes which is part of the cluster
        g.log.info("detaching server %s ", self.servers[1])
        ret, _, err = peer_detach(self.mnode, self.servers[1])
        msg = 'peer detach: failed: %s is not part of cluster\n' \
              % self.servers[1]
        if ret:
            self.assertEqual(err, msg, "Failed to detach %s "
                             % (self.servers[1]))

        # Bring down glusterd of the server which has been detached
        g.log.info("Stopping glusterd on %s ", self.servers[1])
        ret = stop_glusterd(self.servers[1])
        self.assertTrue(ret, "Fail to stop glusterd on %s " % self.servers[1])

        # Trying to peer probe the node whose glusterd was stopped using IP
        g.log.info("Peer probing %s when glusterd down ", self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0, "Peer probe should not pass when "
                                    "glusterd is down")
        self.assertEqual(err, "peer probe: failed: Probe returned with "
                              "Transport endpoint is not connected\n")

        # Trying to peer probe the same node with hostname
        g.log.info("Peer probing node %s using hostname with glusterd down ",
                   self.servers[1])
        hostname = g.run(self.servers[1], "hostname")
        ret, _, err = peer_probe(self.mnode, hostname[1].strip())
        self.assertNotEqual(ret, 0, "Peer probe should not pass when "
                                    "glusterd is down")
        self.assertEqual(err, "peer probe: failed: Probe returned with"
                              " Transport endpoint is not connected\n")

        # Start glusterd again for the next set of test steps
        g.log.info("starting glusterd on %s ", self.servers[1])
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "glusterd couldn't start successfully on %s"
                        % self.servers[1])

        # Bring down the network for sometime
        network_status = bring_down_network_interface(self.servers[1], 150)

        # Peer probing the node using IP when it is still not online
        g.log.info("Peer probing node %s when network is down",
                   self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0, "Peer probe passed when it was expected to"
                                    " fail")
        self.assertEqual(err.split("\n")[0], "peer probe: failed: Probe "
                                             "returned with Transport endpoint"
                                             " is not connected")

        # Peer probing the node using hostname when it is still not online
        g.log.info("Peer probing node %s using hostname which is still "
                   "not online ",
                   self.servers[1])
        ret, _, err = peer_probe(self.mnode, hostname[1].strip())
        self.assertNotEqual(ret, 0, "Peer probe should not pass when node "
                                    "has not come online")
        self.assertEqual(err.split("\n")[0], "peer probe: failed: Probe "
                                             "returned with Transport endpoint"
                                             " is not connected")

        ret, _, _ = network_status.async_communicate()
        if ret != 0:
            g.log.error("Failed to perform network interface ops")

        # Peer probe the node must pass
        g.log.info("peer probing node %s", self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, "Peer probe has failed unexpectedly with "
                                 "%s " % err)

        # Checking if core file created in "/", "/tmp" and "/var/log/core"
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "core file found")
Esempio n. 24
0
    def test_volume_op(self):

        # Starting a non existing volume should fail
        ret, _, _ = volume_start(self.mnode, "no_vol", force=True)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to Start a non"
            " existing volume. Actual: Successfully started "
            "a non existing volume")
        g.log.info("Starting a non existing volume is failed")

        # Stopping a non existing volume should fail
        ret, _, _ = volume_stop(self.mnode, "no_vol", force=True)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to stop "
            "non-existing volume. Actual: Successfully "
            "stopped a non existing volume")
        g.log.info("Stopping a non existing volume is failed")

        # Deleting a non existing volume should fail
        ret = volume_delete(self.mnode, "no_vol")
        self.assertTrue(
            ret, "Expected: It should fail to delete a "
            "non existing volume. Actual:Successfully deleted "
            "a non existing volume")
        g.log.info("Deleting a non existing volume is failed")

        # Detach a server and try to create volume with node
        # which is not in cluster
        ret, _, _ = peer_detach(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, ("Peer detach is failed"))
        g.log.info("Peer detach is successful")

        num_of_bricks = len(self.servers)
        bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
                                       self.servers, self.all_servers_info)

        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Successfully created volume with brick "
            "from which is not a part of node")
        g.log.info("Creating a volume with brick from node which is not part "
                   "of cluster is failed")

        # Peer probe the detached server
        ret, _, _ = peer_probe(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, ("Peer probe is failed"))
        g.log.info("Peer probe is successful")

        # Create and start a volume
        ret = setup_volume(self.mnode,
                           self.all_servers_info,
                           self.volume,
                           force=True)
        self.assertTrue(ret, "Failed to create the volume")
        g.log.info("Successfully created and started the volume")

        # Starting already started volume should fail
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to start a "
            "already started volume. Actual:Successfully"
            " started a already started volume ")
        g.log.info("Starting a already started volume is Failed.")

        # Deleting a volume without stopping should fail
        ret = volume_delete(self.mnode, self.volname)
        self.assertFalse(ret, ("Expected: It should fail to delete a volume"
                               " without stopping. Actual: Successfully "
                               "deleted a volume without stopping it"))
        g.log.error("Failed to delete a volume without stopping it")

        # Stopping a volume should succeed
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("volume stop is failed"))
        g.log.info("Volume stop is success")

        # Stopping a already stopped volume should fail
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to stop a "
            "already stopped volume . Actual: Successfully"
            "stopped a already stopped volume")
        g.log.info("Volume stop is failed on already stopped volume")

        # Deleting a volume should succeed
        ret = volume_delete(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume delete is failed"))
        g.log.info("Volume delete is success")

        # Deleting a non existing volume should fail
        ret = volume_delete(self.mnode, self.volname)
        self.assertTrue(
            ret, "Expected: It should fail to delete a non "
            "existing volume. Actual:Successfully deleted a "
            "non existing volume")
        g.log.info("Volume delete is failed for non existing volume")

        # Volume info command should succeed
        ret = get_volume_info(self.mnode)
        self.assertIsNotNone(ret, "volume info command failed")
        g.log.info("Volume info command is success")
Esempio n. 25
0
    def test_volume_create(self):

        # create and start a volume
        self.volume['name'] = "first_volume"
        self.volname = "first_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")

        # bring a brick down and volume start force should bring it to online

        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        ret = bring_bricks_offline(self.volname, bricks_list[0:2])
        self.assertTrue(ret, "Failed to bring down the bricks")
        g.log.info("Successfully brought the bricks down")

        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Failed to start the volume")
        g.log.info("Volume start with force is success")

        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to bring the bricks online")
        g.log.info("Volume start with force successfully brought all the "
                   "bricks online")

        # create volume with previously used bricks and different volume name
        self.volname = "second_volume"
        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to create a "
            "volume with previously used bricks. Actual:"
            "Successfully created the volume with previously"
            " used bricks")
        g.log.info("Failed to create the volume with previously used bricks")

        # create a volume with already existing volume name
        self.volume['name'] = "first_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(
            ret, "Expected: It should fail to create a volume"
            " with already existing volume name. Actual: "
            "Successfully created the volume with "
            "already existing volname")
        g.log.info("Failed to create the volume with already existing volname")

        # creating a volume with non existing brick path should fail

        self.volname = "second_volume"
        bricks_list = form_bricks_list(self.mnode, self.volname,
                                       len(self.servers), self.servers,
                                       self.all_servers_info)
        nonexisting_brick_index = random.randint(0, len(bricks_list) - 1)
        non_existing_brick = bricks_list[nonexisting_brick_index].split(":")[0]
        non_existing_path = ":/brick/non_existing_path"
        non_existing_brick = non_existing_brick + non_existing_path
        bricks_list[nonexisting_brick_index] = non_existing_brick

        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Expected: Creating a volume with non "
            "existing brick path should fail. Actual: "
            "Successfully created the volume with "
            "non existing brick path")
        g.log.info("Failed to create the volume with non existing brick path")

        # cleanup the volume and peer detach all servers. form two clusters,try
        # to create a volume with bricks whose nodes are in different clusters

        # cleanup volumes
        vol_list = get_volume_list(self.mnode)
        self.assertIsNotNone(vol_list, "Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            self.assertTrue(ret, "Unable to delete volume % s" % volume)

        # peer detach all servers
        ret = peer_detach_servers(self.mnode, self.servers)
        self.assertTrue(ret, "Peer detach to all servers is failed")
        g.log.info("Peer detach to all the servers is success")

        # form cluster 1
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertEqual(
            ret, 0, "Peer probe from %s to %s is failed" %
            (self.servers[0], self.servers[1]))
        g.log.info("Peer probe is success from %s to %s" %
                   (self.servers[0], self.servers[1]))

        # form cluster 2
        ret, _, _ = peer_probe(self.servers[2], self.servers[3])
        self.assertEqual(
            ret, 0, "Peer probe from %s to %s is failed" %
            (self.servers[2], self.servers[3]))
        g.log.info("Peer probe is success from %s to %s" %
                   (self.servers[2], self.servers[3]))

        # Creating a volume with bricks which are part of another
        # cluster should fail
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertFalse(
            ret, "Expected: Creating a volume with bricks"
            " which are part of another cluster should fail."
            " Actual: Successfully created the volume with "
            "bricks which are part of another cluster")
        g.log.info("Failed to create the volume with bricks which are "
                   "part of another cluster")

        # form a cluster, bring a node down. try to create a volume when one of
        # the brick node is down
        ret, _, _ = peer_detach(self.servers[2], self.servers[3])
        self.assertEqual(ret, 0, "Peer detach is failed")
        g.log.info("Peer detach is success")

        ret = peer_probe_servers(self.mnode, self.servers)
        self.assertTrue(ret, "Peer probe is failed")
        g.log.info("Peer probe to all the servers is success")

        random_server = self.servers[random.randint(1, len(self.servers) - 1)]
        ret = stop_glusterd(random_server)
        self.assertTrue(ret, "Glusterd is stopped successfully")

        self.volume['name'] = "third_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertFalse(
            ret, "Expected: It should fail to create a volume "
            "when one of the node is down. Actual: Successfully "
            "created the volume with bbrick whose node is down")

        g.log.info("Failed to create the volume with brick whose node is down")
    def test_gluster_operation_after_removing_firewall(self):
        """
        Test steps:
        1. Add firewall services to the zones on 2 nodes
        2. Create a cluster using the 2 nodes
        3. Check peer status on both the nodes
        4. Remove firewall services from both the nodes
        5. Check peer status on both the nodes
        6. Create a distribute volume using both the node bricks and start it
        7. Mount the volume on different node, it should fail
        8. Cleanup the volume, Detach the node and try to probe again
        9. Check peer status
        10. Remove firewall services permanently and reload firewall
        11. Check peer status
        12. Create a distribute volume using both the node bricks and start it
        13. Mount the volume on different node, it should fail
        """
        # pylint: disable=too-many-statements
        # Add firewall services on first 2 nodes
        ret = self._add_firewall_services(self.servers[:2])
        self.assertTrue(ret, "Failed to add services to firewall")

        self.firewall_added = True

        # Peer probe second node
        self._probe_peer(self.servers[1])

        # Check peer status on both the nodes
        ret = wait_for_peers_to_connect(self.mnode, self.servers[:2])
        self.assertTrue(ret, "Peer is not connected")

        # Remove firewall services
        self._remove_firewall_services(self.servers[:2])

        self.firewall_added = False

        # Create a volume
        self._create_distribute_volume("distribute_volume")

        # Start the volume
        self._start_the_volume(self.volname)

        # Mount the volume on a different node, it should fail
        self._try_mounting_volume()

        # Cleanup volume before peer detach
        ret = cleanup_volume(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to cleanup volume")

        # Detach the probed node
        ret, _, _ = peer_detach(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, "Failed to detach node: %s" % self.servers[1])

        # Peer probe the node should fail
        self._probe_peer(self.servers[1], True)

        # Add firewall services permanently
        ret = self._add_firewall_services(self.servers[:2])
        self.assertTrue(ret, "Failed to add services to firewall")

        self.firewall_added = True

        # Reload firewall
        ret = self._reload_firewall_service(self.servers[:2])
        self.assertTrue(ret, "Failed to reload firewall service")

        # Peer probe again
        self._probe_peer(self.servers[1])

        # Check peer status the probed node
        ret = wait_for_peers_to_connect(self.mnode, self.servers[1])
        self.assertTrue(ret, "Peer is not connected")

        # Remove firewall services permanently
        self._remove_firewall_services(self.servers[:2])

        self.firewall_added = False

        # Reload firewall
        ret = self._reload_firewall_service(self.servers[:2])
        self.assertTrue(ret, "Failed to reload firewall service")

        # Check peer status
        ret = is_peer_connected(self.mnode, self.servers[1])
        self.assertTrue(ret, "Peer is not connected")

        # Create a volume
        self._create_distribute_volume("distribute_volume_2")

        # Start the volume
        self._start_the_volume(self.volname)

        # Mount the volume on a different node, it should fail
        self._try_mounting_volume()
Esempio n. 27
0
    def test_peer_detach_host(self):
        # peer Detaching specified server from cluster
        # peer Detaching detached server again
        # peer Detaching invalid host
        # peer Detaching Non exist host
        # peer Checking Core file created or not
        # Peer detach one node which contains the bricks of volume created
        # Peer detach force a node which is hosting bricks of a volume

        # Timestamp of current test case of start time
        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # Assigning non existing host to variable
        self.non_exist_host = '256.256.256.256'

        # Assigning invalid ip to vaiable
        self.invalid_ip = '10.11.a'

        # Peer detach to specified server
        g.log.info("Start detach specified server :%s" % self.servers[1])
        ret, out, _ = peer_detach(self.mnode, self.servers[1])
        self.assertEqual(ret, 0,
                         "Failed to detach server :%s" % self.servers[1])

        # Detached server detaching again, Expected to fail detach
        g.log.info("Start detached server detaching "
                   "again : %s" % self.servers[1])
        ret, out, _ = peer_detach(self.mnode, self.servers[1])
        self.assertNotEqual(
            ret, 0, "Detach server should "
            "fail :%s" % self.servers[1])

        # Probing detached server
        g.log.info("Start probing detached server : %s" % self.servers[1])
        ret = peer_probe_servers(self.mnode, self.servers[1])
        self.assertTrue(
            ret, "Peer probe failed from %s to other "
            "server : %s" % (self.mnode, self.servers[1]))

        # Detach invalid host
        g.log.info("Start detaching invalid host :%s " % self.invalid_ip)
        ret, out, _ = peer_detach(self.mnode, self.invalid_ip)
        self.assertNotEqual(
            ret, 0, "Detach invalid host should "
            "fail :%s" % self.invalid_ip)

        # Detach non exist host
        g.log.info("Start detaching non exist host : %s" % self.non_exist_host)
        ret, out, _ = peer_detach(self.mnode, self.non_exist_host)
        self.assertNotEqual(
            ret, 0, "Detach non existing host "
            "should fail :%s" % self.non_exist_host)

        # Chekcing core. file created or not in "/", "/tmp", "/log/var/core
        # directory
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "glusterd service should not crash")
        g.log.info("No core file found, glusterd service running "
                   "successfully")

        # Creating Volume
        g.log.info("Started creating volume: %s" % self.volname)
        ret = self.setup_volume()
        self.assertTrue(ret, "Volume creation failed: %s" % self.volname)

        # Peer detach one node which contains the bricks of the volume created
        g.log.info("Start detaching server %s which is hosting "
                   "bricks of a volume" % self.servers[1])
        ret, out, err = peer_detach(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0,
                            "detach server should fail: %s" % self.servers[1])
        msg = ('peer detach: failed: Brick(s) with the peer ' +
               self.servers[1] + ' ' + 'exist in cluster')
        self.assertIn(msg, err, "Peer detach not failed with "
                      "proper error message")

        #  Peer detach force a node which is hosting bricks of a volume
        g.log.info("start detaching server %s with force option "
                   "which is hosting bricks of a volume" % self.servers[1])
        ret, out, err = peer_detach(self.mnode, self.servers[1], force=True)
        self.assertNotEqual(
            ret, 0, "detach server should fail with force "
            "option : %s" % self.servers[1])
        msg = ('peer detach: failed: Brick(s) with the peer ' +
               self.servers[1] + ' ' + 'exist in cluster')
        self.assertIn(
            msg, err, "Peer detach not failed with proper "
            "error message with force option")
Esempio n. 28
0
    def test_peer_probe_when_glusterd_down(self):
        # pylint: disable=too-many-statements
        '''
        Test script to verify the behavior when we try to peer
        probe a valid node whose glusterd is down
        Also post validate to make sure no core files are created
        under "/", /var/log/core and /tmp  directory

        Ref: BZ#1257394 Provide meaningful error on peer probe and peer detach
        Test Steps:
        1 check the current peer status
        2 detach one of the valid nodes which is already part of cluster
        3 stop glusterd on that node
        4 try to attach above node to cluster, which must fail with
          Transport End point error
        5 Recheck the test using hostname, expected to see same result
        6 start glusterd on that node
        7 halt/reboot the node
        8 try to peer probe the halted node, which must fail again.
        9 The only error accepted is
          "peer probe: failed: Probe returned with Transport endpoint is not
          connected"
        10 Check peer status and make sure no other nodes in peer reject state
        '''

        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # detach one of the nodes which is part of the cluster
        g.log.info("detaching server %s ", self.servers[1])
        ret, _, err = peer_detach(self.mnode, self.servers[1])
        msg = 'peer detach: failed: %s is not part of cluster\n' \
              % self.servers[1]
        if ret:
            self.assertEqual(err, msg, "Failed to detach %s "
                             % (self.servers[1]))

        # bring down glusterd of the server which has been detached
        g.log.info("Stopping glusterd on %s ", self.servers[1])
        ret = stop_glusterd(self.servers[1])
        self.assertTrue(ret, "Fail to stop glusterd on %s " % self.servers[1])

        # trying to peer probe the node whose glusterd was stopped using its IP
        g.log.info("Peer probing %s when glusterd down ", self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0, "Peer probe should not pass when "
                                    "glusterd is down")
        self.assertEqual(err, "peer probe: failed: Probe returned with "
                              "Transport endpoint is not connected\n")

        # trying to peer probe the same node with hostname
        g.log.info("Peer probing node %s using hostname with glusterd down ",
                   self.servers[1])
        hostname = g.run(self.servers[1], "hostname")
        ret, _, err = peer_probe(self.mnode, hostname[1].strip())
        self.assertNotEqual(ret, 0, "Peer probe should not pass when "
                                    "glusterd is down")
        self.assertEqual(err, "peer probe: failed: Probe returned with"
                              " Transport endpoint is not connected\n")

        # start glusterd again for the next set of test steps
        g.log.info("starting glusterd on %s ", self.servers[1])
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "glusterd couldn't start successfully on %s"
                        % self.servers[1])

        # reboot a server and then trying to peer probe at the time of reboot
        g.log.info("Rebooting %s and checking peer probe", self.servers[1])
        reboot = g.run_async(self.servers[1], "reboot")

        # Mandatory sleep for 3 seconds to make sure node is in halted state
        sleep(3)

        # Peer probing the node using IP when it is still not online
        g.log.info("Peer probing node %s which has been issued a reboot ",
                   self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0, "Peer probe passed when it was expected to"
                                    " fail")
        self.assertEqual(err, "peer probe: failed: Probe returned with "
                              "Transport endpoint is not connected\n")

        # Peer probing the node using hostname when it is still not online
        g.log.info("Peer probing node %s using hostname which is still "
                   "not online ",
                   self.servers[1])
        ret, _, err = peer_probe(self.mnode, hostname[1].strip())
        self.assertNotEqual(ret, 0, "Peer probe should not pass when node "
                                    "has not come online")
        self.assertEqual(err, "peer probe: failed: Probe returned with "
                              "Transport endpoint is not connected\n")

        ret, _, _ = reboot.async_communicate()
        self.assertEqual(ret, 255, "reboot failed")

        # Validate if rebooted node is online or not
        count = 0
        while count < 40:
            sleep(15)
            ret, _ = are_nodes_online(self.servers[1])
            if ret:
                g.log.info("Node %s is online", self.servers[1])
                break
            count += 1
        self.assertTrue(ret, "Node in test not yet online")

        # check if glusterd is running post reboot
        ret = wait_for_glusterd_to_start(self.servers[1],
                                         glusterd_start_wait_timeout=120)
        self.assertTrue(ret, "Glusterd service is not running post reboot")

        # peer probe the node must pass
        g.log.info("peer probing node %s", self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, "Peer probe has failed unexpectedly with "
                                 "%s " % err)

        # checking if core file created in "/", "/tmp" and "/var/log/core"
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "core file found")
    def test_rebalance_peer_probe(self):
        """
        Test case:
        1. Detach a peer
        2. Create a volume, start it and mount it
        3. Start creating a few files on mount point
        4. Collect arequal checksum on mount point pre-rebalance
        5. Expand the volume
        6. Start rebalance
        7. While rebalance is going, probe a peer and check if
           the peer was probed successfully
        7. Collect arequal checksum on mount point post-rebalance
           and compare wth value from step 4
        """

        # Detach a peer
        ret, _, _ = peer_detach(self.mnode, self.servers[5])
        self.assertEqual(ret, 0, "Failed to detach peer %s" % self.servers[5])

        self.is_peer_detached = True

        # Start I/O from mount point and wait for it to complete
        cmd = ("cd %s; for i in {1..1000} ; do "
               "dd if=/dev/urandom of=file$i bs=10M count=1; done" %
               self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.first_client, cmd)
        self.assertEqual(ret, 0, "IO failed on volume %s" % self.volname)

        # Collect arequal checksum before rebalance
        arequal_checksum_before = collect_mounts_arequal(self.mounts[0])

        # Add brick to volume
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to add brick on volume %s" % self.volname)

        # Trigger rebalance and wait for it to complete
        ret, _, _ = rebalance_start(self.mnode, self.volname, force=True)
        self.assertEqual(
            ret, 0,
            "Failed to start rebalance on the volume %s" % self.volname)

        # Let rebalance run for a while
        sleep(5)

        # Add new node to the cluster
        ret = peer_probe_servers(self.mnode, self.servers[5])
        self.assertTrue(ret,
                        "Failed to peer probe server : %s" % self.servers[5])
        g.log.info(
            "Peer probe success for %s and all peers are in "
            "connected state", self.servers[5])

        self.is_peer_detached = False

        # Wait for rebalance to complete
        ret = wait_for_rebalance_to_complete(self.mnode,
                                             self.volname,
                                             timeout=1200)
        self.assertTrue(
            ret, "Rebalance is not yet complete on the volume "
            "%s" % self.volname)
        g.log.info("Rebalance successfully completed")

        # Collect arequal checksum after rebalance
        arequal_checksum_after = collect_mounts_arequal(self.mounts[0])

        # Check for data loss by comparing arequal before and after rebalance
        self.assertEqual(arequal_checksum_before, arequal_checksum_after,
                         "arequal checksum is NOT MATCHNG")
        g.log.info("arequal checksum is SAME")
    def test_detach_node_used_to_mount(self):
        # pylint: disable=too-many-statements
        """
        Test case:
        1.Create a 1X3 volume with only 3 nodes from the cluster.
        2.Mount volume on client node using the ip of the fourth node.
        3.Write IOs to the volume.
        4.Detach node N4 from cluster.
        5.Create a new directory on the mount point.
        6.Create a few files using the same command used in step 3.
        7.Add three more bricks to make the volume
          2x3 using add-brick command.
        8.Do a gluster volume rebalance on the volume.
        9.Create more files from the client on the mount point.
        10.Check for files on bricks from both replica sets.
        11.Create a new directory from the client on the mount point.
        12.Check for directory in both replica sets.
        """

        # Create and start a volume
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")
        g.log.info("Volume %s created successfully", self.volname)

        # Mounting the volume.
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.servers[4],
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully using %s", self.servers[4])

        # Creating 100 files.
        command = ('for number in `seq 1 100`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Detach N4 from the list.
        ret, _, _ = peer_detach(self.mnode, self.servers[4])
        self.assertEqual(ret, 0, "Failed to detach %s" % self.servers[4])
        g.log.info("Peer detach successful %s", self.servers[4])

        # Creating a dir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + "/dir1",
                    parents=True)
        self.assertTrue(ret, ("Failed to create directory dir1."))
        g.log.info("Directory dir1 created successfully.")

        # Creating 100 files.
        command = ('for number in `seq 101 200`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Forming brick list
        brick_list = form_bricks_list_to_add_brick(self.mnode, self.volname,
                                                   self.servers,
                                                   self.all_servers_info)

        # Adding bricks
        ret, _, _ = add_brick(self.mnode, self.volname, brick_list)
        self.assertEqual(ret, 0,
                         "Failed to add brick to the volume %s" % self.volname)
        g.log.info("Brick added successfully to the volume %s", self.volname)

        # Start rebalance for volume.
        g.log.info("Starting rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance "
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s",
                   self.volname)

        # Creating 100 files.
        command = ('for number in `seq 201 300`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Check for files on bricks.
        attempts = 10
        while attempts:
            number = str(randint(1, 300))
            for brick in brick_list:
                brick_server, brick_dir = brick.split(':')
                file_name = brick_dir + "/file" + number
                if file_exists(brick_server, file_name):
                    g.log.info("Check xattr"
                               " on host %s for file %s", brick_server,
                               file_name)
                    ret = get_fattr_list(brick_server, file_name)
                    self.assertTrue(ret,
                                    ("Failed to get xattr for %s" % file_name))
                    g.log.info("Got xattr for %s successfully", file_name)
            attempts -= 1

        # Creating a dir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + "/dir2")
        if not ret:
            attempts = 5
            while attempts:
                ret = mkdir(self.mounts[0].client_system,
                            self.mounts[0].mountpoint + "/dir2")
                if ret:
                    break
                attempts -= 1
        self.assertTrue(ret, ("Failed to create directory dir2."))
        g.log.info("Directory dir2 created successfully.")

        # Check for directory in both replica sets.
        for brick in brick_list:
            brick_server, brick_dir = brick.split(':')
            folder_name = brick_dir + "/dir2"
            if file_exists(brick_server, folder_name):
                g.log.info(
                    "Check trusted.glusterfs.dht"
                    " on host %s for directory %s", brick_server, folder_name)
                ret = get_fattr(brick_server, folder_name,
                                'trusted.glusterfs.dht')
                self.assertTrue(ret, ("Failed to get trusted.glusterfs.dht"
                                      " xattr for %s" % folder_name))
                g.log.info(
                    "Get trusted.glusterfs.dht xattr"
                    " for %s successfully", folder_name)