예제 #1
0
    def _disable_io_encryption(self):
        """ Disables IO encryption """
        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret, _, _ = umount_volume(self.mounts[0].client_system,
                                  self.mounts[0].mountpoint,
                                  mtype=self.mount_type)
        self.assertEqual(ret, 0, "Failed to Unmount volume")

        # Stop Volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to Stop volume")

        # Disable server and client SSL usage
        options = {"server.ssl": "off",
                   "client.ssl": "off"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Failed to set volume options")

        # Start Volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to Start volume")

        # Mount Volume
        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, "Failed to mount the volume back")
예제 #2
0
 def _bring_bricks_online_and_monitor_heal(self, bricks):
     """Bring the bricks online and monitor heal until completion"""
     ret, _, _ = volume_start(self.mnode, self.volname, force=True)
     self.assertEqual(ret, 0, 'Not able to force start volume')
     ret = monitor_heal_completion(self.mnode,
                                   self.volname,
                                   bricks=list(bricks))
     self.assertTrue(ret, 'Heal is not complete for {}'.format(bricks))
    def test_rmdir_child_when_nonhash_vol_down(self):
        """
        case -1:
        - create parent
        - bring down a non-hashed subvolume for directory child
        - create parent/child
        - rmdir /mnt/parent will fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object
        # Find a non hashed subvolume(or brick)

        # Create parent dir
        parent_dir = self.mountpoint + '/parent'
        child_dir = parent_dir + '/child'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, "mkdir failed")
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
                                                        "parent", "child")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ("Error in bringing down subvolume %s"
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # Create child-dir
        ret = mkdir(self.clients[0], child_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % child_dir))
        g.log.info("mkdir of child directory %s successful", child_dir)

        # 'rmdir' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info("rmdir of parent directory %s failed as expected",
                   parent_dir)

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
예제 #4
0
    def test_gfind_when_brick_down(self):
        """
        Verifying the glusterfind functionality when a brick is down.

        1. Create a volume
        2. Create a session on the volume
        3. Create various files from mount point
        4. Bring down brick process on one of the node
        5. Perform glusterfind pre
        6. Perform glusterfind post
        7. Check the contents of outfile
        """

        # pylint: disable=too-many-statements
        # Create a session for the volume
        ret, _, _ = gfind_create(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Unexpected: Creation of a session for the "
                                  "volume %s failed" % self.volname))
        g.log.info("Successfully created a session for the volume %s",
                   self.volname)

        # Perform glusterfind list to check if session exists
        _, out, _ = gfind_list(self.mnode,
                               volname=self.volname,
                               sessname=self.session)
        self.assertNotEqual(out, "No sessions found.",
                            "Failed to list the glusterfind session")
        g.log.info("Successfully listed the glusterfind session")

        self._perform_io_and_validate_presence_of_files()

        # Wait for changelog to get updated
        sleep(2)

        # Bring one of the brick down.
        brick_list = get_all_bricks(self.mnode, self.volname)
        ret = bring_bricks_offline(self.volname, choice(brick_list))
        self.assertTrue(ret, "Failed to bring down the brick.")
        g.log.info("Succesfully brought down one brick.")

        self._perform_glusterfind_pre_and_validate_outfile()

        # Perform glusterfind post for the session
        ret, _, _ = gfind_post(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind post"))
        g.log.info("Successfully performed glusterfind post")

        # Bring the brick process up.
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Failed to start the volume.")
        g.log.info("Successfully started the volume.")
    def test_rmdir_parent_pre_nonhash_vol_down(self):
        """
        case -4:
        - Bring down a non-hashed subvol for parent_dir
        - mkdir parent
        - rmdir parent should fails with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
                                                        "/", "parent")
        self.assertIsNotNone(nonhashed_subvol,
                             'Error in finding  nonhashed subvol')
        g.log.info("nonhashed subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        parent_dir = self.mountpoint + '/parent'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % parent_dir))
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # 'rmdir' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info("rmdir of parent directory %s failed as expected",
                   parent_dir)

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success.')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
예제 #6
0
    def test_rebalance_with_brick_down(self):
        """
        Rebalance with brick down in replica
        - Create a Replica volume.
        - Bring down one of the brick down in the replica pair
        - Do some IO and create files on the mount point
        - Add a pair of bricks to the volume
        - Initiate rebalance
        - Bring back the brick which was down.
        - After self heal happens, all the files should be present.
        """
        # Log the volume info and status before brick is down.
        log_volume_info_and_status(self.mnode, self.volname)

        # Bring one fo the bricks offline
        brick_list = get_all_bricks(self.mnode, self.volname)
        ret = bring_bricks_offline(self.volname, choice(brick_list))

        # Log the volume info and status after brick is down.
        log_volume_info_and_status(self.mnode, self.volname)

        # Create files at mountpoint.
        cmd = (
            "/usr/bin/env python %s create_files "
            "-f 2000 --fixed-file-size 1k --base-file-name file %s"
            % (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(
            self.mounts[0].client_system, cmd, user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete.
        self.assertTrue(wait_for_io_to_complete(self.all_mounts_procs,
                                                self.mounts[0]),
                        "IO failed on some of the clients")
        g.log.info("IO completed on the clients")

        # Compute the arequal checksum before bringing all bricks online
        arequal_before_all_bricks_online = collect_mounts_arequal(self.mounts)

        # Log the volume info and status before expanding volume.
        log_volume_info_and_status(self.mnode, self.volname)

        # Expand the volume.
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
        g.log.info("Expanding volume is successful on "
                   "volume %s", self.volname)

        # Log the voluem info after expanding volume.
        log_volume_info_and_status(self.mnode, self.volname)

        # Start Rebalance.
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
                                  "%s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s",
                   self.volname)

        # Wait for rebalance to complete
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
                              "%s", self.volname))
        g.log.info("Rebalance is successfully complete on the volume %s",
                   self.volname)

        # Log the voluem info and status before bringing all bricks online
        log_volume_info_and_status(self.mnode, self.volname)

        # Bring all bricks online.
        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Not able to start volume with force option")
        g.log.info("Volume start with force option successful.")

        # Log the volume info and status after bringing all beicks online
        log_volume_info_and_status(self.mnode, self.volname)

        # Monitor heal completion.
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "heal has not yet completed")
        g.log.info("Self heal completed")

        # Compute the arequal checksum after all bricks online.
        arequal_after_all_bricks_online = collect_mounts_arequal(self.mounts)

        # Comparing arequal checksum before and after the operations.
        self.assertEqual(arequal_before_all_bricks_online,
                         arequal_after_all_bricks_online,
                         "arequal checksum is NOT MATCHING")
        g.log.info("arequal checksum is SAME")
예제 #7
0
    def test_ec_rename_files_with_brick_down(self):
        """
        Description: Test to check no errors on file/dir renames when one of
                        the bricks is down in the volume.
        Steps:
        1. Create an EC volume
        2. Mount the volume using FUSE on two different clients
        3. Create ~9 files from one of the client
        4. Create ~9 dir with ~9 files each from another client
        5. Create soft-links, hard-links for file{4..6}, file{7..9}
        6. Create soft-links for dir{4..6}
        7. Begin renaming the files, in multiple iterations
        8. Bring down a brick while renaming the files
        9. Bring the brick online after renaming some of the files
        10. Wait for renaming of the files
        11. Validate no data loss and files are renamed successfully
        """

        # Creating ~9 files from client 1 on mount
        m_point = self.mounts[0].mountpoint
        cmd = 'cd %s; touch file{1..9}_or' % m_point
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(
            ret, 0, "Fail: Not able to create files on "
            "{}".format(m_point))
        g.log.info("Files created successfully on mount point")

        # Creating 9 dir X 9 files in each dir from client 2
        cmd = ('cd %s; mkdir -p dir{1..9}_or; touch '
               'dir{1..9}_or/file{1..9}_or' % m_point)
        ret, _, _ = g.run(self.clients[1], cmd)
        self.assertEqual(
            ret, 0, "Fail: Not able to create dir with files on "
            "{}".format(m_point))
        g.log.info("Dirs with files are created successfully on mount point")

        # Create required soft links and hard links from client 1 on mount
        client, path = self.clients[0], m_point
        self.create_links(client, path)

        client = self.clients[1]
        for i in range(1, 10):

            # Create required soft and hard links in nested dirs
            path = '{}/dir{}_or'.format(m_point, i)
            self.create_links(client, path)

        # Create soft links for dirs
        path = m_point
        for i in range(4, 7):
            ret = create_link_file(client,
                                   '{}/dir{}_or'.format(path, i),
                                   '{}/dir{}_sl'.format(path, i),
                                   soft=True)
            self.assertTrue(
                ret, "Fail: Not able to create soft link for "
                "{}/dir{}_or".format(path, i))
        g.log.info("Created nested soft and hard links for files successfully")

        # Calculate all file count against each section orginal, hard, soft
        # links
        cmd = ('cd %s; arr=(or sl hl); '
               'for i in ${arr[*]}; do find . -name "*$i" | wc -l ; '
               'done; ' % m_point)
        ret, out, _ = g.run(client, cmd)
        self.assertEqual(
            ret, 0, "Not able get list of soft and hard links "
            "created on the mount point")
        all_org, all_soft, all_hard = out.split()

        # Rename 2 out of 3 dir's soft links from client 1
        client = self.clients[0]
        cmd = ('cd %s; sl=0; '
               'for line in `ls -R | grep -P "dir(4|5)_sl"`; '
               'do mv -f "$line" "$line""_renamed"; ((sl++)); done; '
               'echo $sl;' % m_point)
        ret, out, _ = g.run(client, cmd)
        self.assertEqual(ret, 0, "Not able to rename directory soft links")
        temp_soft = out.strip()

        # Start renaming original files from client 1 and
        # softlinks, hardlinks  from client 2
        cmd = ('cd %s; arr=(. dir{1..9}_or);  or=0; '
               'for item in ${arr[*]}; do '
               'cd $item; '
               'for line in `ls | grep -P "file(1|2)_or"`; '
               'do mv -f "$line" "$line""_renamed"; ((or++)); sleep 2; done;'
               'cd - > /dev/null; sleep 1; done; echo $or ' % m_point)
        proc_or = g.run_async(client, cmd)

        client = self.clients[1]
        cmd = ('cd %s; arr=(. dir{1..9}_or); sl=0; hl=0; '
               'for item in ${arr[*]}; do '
               'cd $item; '
               'for line in `ls | grep -P "file(4|5)_sl"`; '
               'do mv -f "$line" "$line""_renamed"; ((sl++)); sleep 1; done; '
               'for line in `ls | grep -P "file(7|8)_hl"`; '
               'do mv -f "$line" "$line""_renamed"; ((hl++)); sleep 1; done; '
               'cd - > /dev/null; sleep 1; done; echo $sl $hl; ' % m_point)
        proc_sl_hl = g.run_async(client, cmd)

        # Wait for some files to be renamed
        sleep(20)

        # Kill one of the bricks
        brick_list = get_all_bricks(self.mnode, self.volname)
        ret = bring_bricks_offline(self.volname, choice(brick_list))
        self.assertTrue(ret, "Failed to bring one of the bricks offline")

        # Wait for some more files to be renamed
        sleep(20)

        # Bring brick online
        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Not able to start Volume with force option")

        # Wait for rename to complete and take count of file operations
        ret, out, _ = proc_or.async_communicate()
        self.assertEqual(ret, 0, "Fail: Origianl files are not renamed")
        ren_org = out.strip()

        ret, out, _ = proc_sl_hl.async_communicate()
        self.assertEqual(ret, 0, "Fail: Soft and Hard links are not renamed")
        ren_soft, ren_hard = out.strip().split()
        ren_soft = str(int(ren_soft) + int(temp_soft))

        # Count actual data of renaming links/files
        cmd = ('cd %s; arr=(or or_renamed sl sl_renamed hl hl_renamed); '
               'for i in ${arr[*]}; do find . -name "*$i" | wc -l ; '
               'done; ' % m_point)
        ret, out, _ = g.run(client, cmd)
        self.assertEqual(
            ret, 0, "Not able to get count of original and link "
            "files after brick was brought up")
        (act_org, act_org_ren, act_soft, act_soft_ren, act_hard,
         act_hard_ren) = out.split()

        # Validate count of expected and actual rename of
        # links/files is matching
        for exp, act, msg in ((ren_org, act_org_ren, 'original'),
                              (ren_soft, act_soft_ren, 'soft links'),
                              (ren_hard, act_hard_ren, 'hard links')):
            self.assertEqual(
                exp, act, "Count of {} files renamed while brick "
                "was offline is not matching".format(msg))

        # Validate no data is lost in rename process
        for exp, act, msg in (
            (int(all_org) - int(act_org_ren), int(act_org), 'original'),
            (int(all_soft) - int(act_soft_ren), int(act_soft), 'soft links'),
            (int(all_hard) - int(act_hard_ren), int(act_hard), 'hard links'),
        ):
            self.assertEqual(
                exp, act, "Count of {} files which are not "
                "renamed while brick was offline "
                "is not matching".format(msg))
    def test_rm_file_when_nonhash_vol_down(self):
        """
        case -3:
        - create parent
        - mkdir parent/child
        - touch parent/child/file
        - bringdown a subvol where file is not present
        - rm -rf parent
            - Only file should be deleted
            - rm -rf of parent should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Find a non hashed subvolume(or brick)
        # Create parent dir
        parent_dir = self.mountpoint + '/parent'
        child_dir = parent_dir + '/child'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % parent_dir))
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # Create child dir
        ret = mkdir(self.clients[0], child_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % child_dir))
        g.log.info("mkdir of child directory %s successful", child_dir)

        # Create a file under child_dir
        file_one = child_dir + '/file_one'
        ret, _, err = g.run(self.clients[0], ("touch %s" % file_one))
        self.assertFalse(ret, ('touch failed for %s err: %s' %
                               (file_one, err)))

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
                                                        "parent/child",
                                                        "file_one")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rm -rf' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info("rmdir of parent directory %s failed as expected"
                   " with err %s", parent_dir, err)

        brickobject = create_brickobjectlist(self.subvols, "parent/child")
        self.assertIsNotNone(brickobject,
                             "could not create brickobject list")
        # Make sure file_one is deleted
        for brickdir in brickobject:
            dir_path = "%s/parent/child/file_one" % brickdir.path
            brick_path = dir_path.split(":")
            self.assertTrue((file_exists(brickdir._host, brick_path[1])) == 0,
                            ('Expected file %s not to exist on servers'
                             % parent_dir))
        g.log.info("file is deleted as expected")

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success.')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
    def test_rmdir_dir_when_hash_nonhash_vol_down(self):
        """
        case -2:
        - create dir1 and dir2
        - bring down hashed subvol for dir1
        - bring down a non-hashed subvol for dir2
        - rmdir dir1 should fail with ENOTCONN
        - rmdir dir2 should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Create dir1 and dir2
        directory_list = []
        for number in range(1, 3):
            directory_list.append('{}/dir{}'.format(self.mountpoint, number))
            ret = mkdir(self.clients[0], directory_list[-1])
            self.assertTrue(ret, ('mkdir failed for %s '
                                  % directory_list[-1]))
            g.log.info("mkdir of directory %s successful",
                       directory_list[-1])

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols, "/",
                                                        "dir1")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir1 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[0])
        self.assertFalse(ret, ('Expected rmdir to fail for %s'
                               % directory_list[0]))
        g.log.info("rmdir of directory %s failed as expected",
                   directory_list[0])

        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Unmounting and Mounting the volume back to Heal
        ret, _, err = umount_volume(self.clients[1], self.mountpoint)
        self.assertFalse(ret, "Error in creating temp mount %s" % err)

        ret, _, err = mount_volume(self.volname,
                                   mtype='glusterfs',
                                   mpoint=self.mountpoint,
                                   mserver=self.servers[0],
                                   mclient=self.clients[1])
        self.assertFalse(ret, "Error in creating temp mount")

        ret, _, _ = g.run(self.clients[1], ("ls %s/dir1" % self.mountpoint))
        self.assertEqual(ret, 0, "Error in lookup for dir1")
        g.log.info("lookup successful for dir1")

        # This confirms that healing is done on dir1
        ret = validate_files_in_dir(self.clients[0],
                                    directory_list[0],
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "validate_files_in_dir for dir1 failed")
        g.log.info("healing successful for dir1")

        # Bring down the hashed subvol
        # Find a hashed subvolume(or brick)
        hashed_subvol, count = find_hashed_subvol(self.subvols, "/", "dir2")
        self.assertIsNotNone(hashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("hashed_subvol %s", hashed_subvol._host)

        # Bring hashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir2 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[1])
        self.assertFalse(ret, ('Expected rmdir to fail for %s'
                               % directory_list[1]))
        g.log.info("rmdir of dir2 directory %s failed as expected",
                   directory_list[1])

        # Cleanup
        # Bring up the subvol - restart the volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Delete dirs
        for directory in directory_list:
            ret = rmdir(self.clients[0], directory)
            self.assertTrue(ret, ('rmdir failed for %s ' % directory))
            g.log.info("rmdir of directory %s successful", directory)
    def test_rebalance_start_not_fail(self):
        """
        1. On Node N1, Add "transport.socket.bind-address N1" in the
            /etc/glusterfs/glusterd.vol
        2. Create a replicate (1X3) and disperse (4+2) volumes with
            name more than 108 chars
        3. Mount the both volumes using node 1 where you added the
            "transport.socket.bind-address" and start IO(like untar)
        4. Perform add-brick on replicate volume 3-bricks
        5. Start rebalance on replicated volume
        6. Perform add-brick for disperse volume 6 bricks
        7. Start rebalance of disperse volume
        """
        cmd = ("sed -i 's/end-volume/option "
               "transport.socket.bind-address {}\\n&/g' "
               "/etc/glusterfs/glusterd.vol".format(self.mnode))
        disperse = ("disperse_e4upxjmtre7dl4797wedbp7r3jr8equzvmcae9f55t6z1"
                    "ffhrlk40jtnrzgo4n48fjf6b138cttozw3c6of3ze71n9urnjkshoi")
        replicate = ("replicate_e4upxjmtre7dl4797wedbp7r3jr8equzvmcae9f55t6z1"
                     "ffhrlk40tnrzgo4n48fjf6b138cttozw3c6of3ze71n9urnjskahn")

        volnames = (disperse, replicate)
        for volume, vol_name in (("disperse", disperse), ("replicate",
                                                          replicate)):

            bricks_list = form_bricks_list(self.mnode, volume,
                                           6 if volume == "disperse" else 3,
                                           self.servers, self.all_servers_info)
            if volume == "replicate":
                ret, _, _ = volume_create(self.mnode,
                                          replicate,
                                          bricks_list,
                                          replica_count=3)

            else:
                ret, _, _ = volume_create(self.mnode,
                                          disperse,
                                          bricks_list,
                                          force=True,
                                          disperse_count=6,
                                          redundancy_count=2)

            self.assertFalse(
                ret, "Unexpected: Volume create '{}' failed ".format(vol_name))
            ret, _, _ = volume_start(self.mnode, vol_name)
            self.assertFalse(ret, "Failed to start volume")

        # Add entry in 'glusterd.vol'
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertFalse(ret, "Failed to add entry in 'glusterd.vol' file")

        self.list_of_io_processes = []

        # mount volume
        self.mount = ("/mnt/replicated_mount", "/mnt/disperse_mount")
        for mount_dir, volname in zip(self.mount, volnames):
            ret, _, _ = mount_volume(volname, "glusterfs", mount_dir,
                                     self.mnode, self.clients[0])
            self.assertFalse(
                ret, "Failed to mount the volume '{}'".format(mount_dir))

            # Run IO
            # Create a dir to start untar
            # for mount_point in self.mount:
            self.linux_untar_dir = "{}/{}".format(mount_dir, "linuxuntar")
            ret = mkdir(self.clients[0], self.linux_untar_dir)
            self.assertTrue(ret, "Failed to create dir linuxuntar for untar")

            # Start linux untar on dir linuxuntar
            ret = run_linux_untar(self.clients[:1],
                                  mount_dir,
                                  dirs=tuple(['linuxuntar']))
            self.list_of_io_processes += ret
            self.is_io_running = True

        # Add Brick to replicate Volume
        bricks_list = form_bricks_list(self.mnode, replicate, 3, self.servers,
                                       self.all_servers_info, "replicate")
        ret, _, _ = add_brick(self.mnode, replicate, bricks_list, force=True)
        self.assertFalse(ret, "Failed to add-brick '{}'".format(replicate))

        # Trigger Rebalance on the volume
        ret, _, _ = rebalance_start(self.mnode, replicate)
        self.assertFalse(
            ret,
            "Failed to start rebalance on the volume '{}'".format(replicate))

        # Add Brick to disperse Volume
        bricks_list = form_bricks_list(self.mnode, disperse, 6, self.servers,
                                       self.all_servers_info, "disperse")

        ret, _, _ = add_brick(self.mnode, disperse, bricks_list, force=True)
        self.assertFalse(ret, "Failed to add-brick '{}'".format(disperse))

        # Trigger Rebalance on the volume
        ret, _, _ = rebalance_start(self.mnode, disperse)
        self.assertFalse(
            ret, "Failed to start rebalance on the volume {}".format(disperse))

        # Check if Rebalance is completed on both the volume
        for volume in (replicate, disperse):
            ret = wait_for_rebalance_to_complete(self.mnode,
                                                 volume,
                                                 timeout=600)
            self.assertTrue(
                ret,
                "Rebalance is not Compleated on Volume '{}'".format(volume))