コード例 #1
0
    def test_induce_holes_thenfixlayout(self):

        # pylint: disable=too-many-statements
        m_point = self.mounts[0].mountpoint
        command = 'mkdir -p ' + m_point + '/testdir'
        ret, _, _ = g.run(self.clients[0], command)
        self.assertEqual(ret, 0, "mkdir failed")
        g.log.info("mkdir is successful")

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")

        # Log Volume Info and Status before shrinking the volume.
        g.log.info("Logging volume info and Status before shrinking volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Shrinking volume by removing bricks
        g.log.info("Start removing bricks from volume")
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 self.remove_brick_list, "force")
        self.assertFalse(ret, "Remove-brick with force: FAIL")
        g.log.info("Remove-brick with force: PASS")

        # Check the layout
        ret = is_layout_complete(self.mnode, self.volname, dirpath='/testdir')
        self.assertFalse(ret, "Volume %s: Layout is complete")
        g.log.info("Volume %s: Layout has some holes")

        # Start Rebalance fix-layout
        g.log.info("Volume %s: Start fix-layout", self.volname)
        ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=True)
        self.assertEqual(ret, 0, ("Volume %s: fix-layout start failed"
                                  "%s", self.volname))
        g.log.info("Volume %s: fix-layout start success", self.volname)

        # Wait for fix-layout to complete
        g.log.info("Waiting for fix-layout to complete")
        ret = wait_for_fix_layout_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume %s: Fix-layout is either failed or "
                              "in-progress", self.volname))
        g.log.info("Volume %s: Fix-layout completed successfully",
                   self.volname)

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")
    def test_rebalance_while_remove_brick_in_progress(self):
        """
        - Create directories and files on the mount point.
        -  now remove one of the brick from the volume
            gluster volume remove-brick <vol> <brick> start
        - immediately start rebalance on the same volume
            gluster volume rebalance <vol> start
        """
        # pylint: disable=too-many-statements
        # DHT Layout validation
        for mount in self.mounts:
            g.log.debug('Check DHT values %s:%s', mount.client_system,
                        mount.mountpoint)
            ret = validate_files_in_dir(self.clients[0], mount.mountpoint,
                                        test_type=LAYOUT_IS_COMPLETE,
                                        file_type=FILETYPE_DIRS)
            self.assertTrue(ret, "TEST_LAYOUT_IS_COMPLETE: FAILED")
            g.log.info("TEST_LAYOUT_IS_COMPLETE: PASS")

        # Log Volume Info and Status before shrinking the volume.
        g.log.info("Logging volume info and Status before shrinking volume")
        log_volume_info_and_status(self.mnode, self.volname)
        g.log.info("Successful in logging volume info and status of volume "
                   "%s", self.volname)

        # Form bricks list for Shrinking volume
        self.remove_brick_list = form_bricks_list_to_remove_brick(
            self.mnode, self.volname, subvol_name=1)
        self.assertIsNotNone(self.remove_brick_list, ("Volume %s: Failed to "
                                                      "form bricks list for "
                                                      "shrink", self.volname))
        g.log.info("Volume %s: Formed bricks list for shrink", self.volname)

        # Shrink volume by removing bricks with option start
        g.log.info("Start removing bricks for %s", self.volname)
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 self.remove_brick_list, "start")
        self.assertEqual(ret, 0, ("Volume %s: Remove-brick status failed",
                                  self.volname))
        g.log.info("Volume %s: Remove-brick start success ", self.volname)

        # Log remove-brick status
        g.log.info("Logging Remove-brick status")
        ret, out, err = remove_brick(self.mnode, self.volname,
                                     self.remove_brick_list, "status")
        self.assertEqual(ret, 0, ("Volume %s: Remove-brick status failed",
                                  self.volname))
        g.log.info("Volume %s: Remove-brick status", self.volname)
        g.log.info(out)

        # Start rebalance while volume shrink in-progress
        g.log.info("Volume %s: Start rebalance while volume shrink is "
                   "in-progress")
        _, _, err = rebalance_start(self.mnode, self.volname)
        self.assertIn("Either commit or stop the remove-brick task.", err,
                      "Rebalance started successfully while volume shrink"
                      " is in-progress")
        g.log.info("Failed to start rebalance while volume shrink is "
                   "in progress <EXPECTED>")
コード例 #3
0
    def mkdir_post_hashdown(self, subvols, parent_dir):
        '''
        case -1:
        - bring down a subvol
        - create a directory so that it does not hash to down subvol
        - make sure stat is successful on the dir
        '''
        # pylint: disable=protected-access
        # pylint: disable=pointless-string-statement
        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(subvols, "/", "parent")
        if nonhashed_subvol is None:
            g.log.error('Error in finding nonhashed subvol for parent')
            return False

        # bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, subvols[count])
        if ret == 0:
            g.log.error('Error in bringing down subvolume %s', subvols[count])
            return False

        g.log.info('target subvol %s is offline', subvols[count])

        # create parent dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        if ret != 0:
            g.log.error('mkdir failed for %s err: %s', parent_dir, err)
            return False
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # this confirms both layout and stat of the directory
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint + '/parent_dir',
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "Layout is not complete")
        g.log.info('Layout is complete')

        # bring up the subvol
        ret = bring_bricks_online(self.mnode,
                                  self.volname,
                                  subvols[count],
                                  bring_bricks_online_methods=None)
        if ret == 0:
            g.log.error("Error in bringing back subvol online")
            return False

        g.log.info('Subvol is back online')

        # delete parent_dir
        ret, _, err = g.run(self.clients[0], ("rmdir %s" % parent_dir))
        if ret != 0:
            g.log.error('rmdir failed for %s err: %s', parent_dir, err)
        g.log.info("rmdir of directory %s successful", parent_dir)

        return True
コード例 #4
0
    def test_add_brick_while_remove_brick_is_in_progress(self):
        # DHT Layout and hash validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")

        # Log Volume Info and Status before shrinking the volume.
        g.log.info("Logging volume info and Status before shrinking volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Form bricks list for volume shrink
        self.remove_brick_list = form_bricks_list_to_remove_brick(
            self.mnode, self.volname, subvol_name=1)
        self.assertIsNotNone(self.remove_brick_list, ("Volume %s: Failed to "
                                                      "form bricks list for "
                                                      "shrink", self.volname))
        g.log.info("Volume %s: Formed bricks list for shrink", self.volname)

        # Shrink volume by removing bricks
        g.log.info("Start removing bricks from volume")
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 self.remove_brick_list, "start")
        self.assertEqual(ret, 0, ("Volume %s shrink failed ", self.volname))
        g.log.info("Volume %s shrink started ", self.volname)
        # Log remove-brick status
        g.log.info("Logging Remove-brick status")
        ret, out, err = remove_brick(self.mnode, self.volname,
                                     self.remove_brick_list, "status")
        self.assertEqual(ret, 0,
                         ("Remove-brick status failed on %s ", self.volname))
        g.log.info("Remove-brick status %s", self.volname)
        g.log.info(out)

        # Expanding volume while volume shrink is in-progress
        g.log.info("Volume %s: Expand volume while volume shrink in-progress",
                   self.volname)
        _, _, err = add_brick(self.mnode, self.volname, self.add_brick_list)
        self.assertIn(
            "rebalance is in progress", err, "Successfully added"
            "bricks to the volume <NOT EXPECTED>")
        g.log.info(
            "Volume %s: Failed to add-bricks while volume shrink "
            "in-progress <EXPECTED>", self.volname)

        # cleanup add-bricks list
        for brick in self.add_brick_list:
            brick_node, brick_path = brick.split(":")
            ret, _, _ = g.run(brick_node, ("rm -rf %s", brick_path))
            if ret != 0:
                g.log.error("Failed to clean %s:%s", brick_node, brick_path)
        g.log.info("Successfully cleaned backend add-brick bricks list")
コード例 #5
0
    def setUp(self):
        # Calling GlusterBaseClass setUp
        GlusterBaseClass.setUp.im_func(self)

        # Start IO on mounts
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for index, mount_obj in enumerate(self.mounts, start=1):
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 2 "
                   "--max-num-of-dirs 2 "
                   "--num-of-files 10 %s" %
                   (self.script_upload_path, index + 10, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)

        # Wait for IO to complete
        g.log.info("Wait for IO to complete")
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
        if not ret:
            raise ExecutionError("IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        if not ret:
            raise ExecutionError("Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")
コード例 #6
0
    def test_dir_change_perm_recursive(self):
        # pylint: disable=too-many-statements
        # Start IO on mounts
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for index, mount_obj in enumerate(self.mounts, start=1):
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 5 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, index + 10, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)

        # Wait for IO to complete
        g.log.info("Wait for IO to complete as IO validation did not "
                   "succeed in test method")
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on mount %s", self.clients[0])

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "layout is complete: FAILED")
        g.log.info("layout is complete: PASS")

        brick_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(brick_list, "Failed to get brick list")
        g.log.info("Successful in getting brick list %s", brick_list)

        mount_obj = self.mounts[0]
        cmd = ("find %s -mindepth 1 -maxdepth 1 -type d | "
               "xargs chown -R test_user1" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("Change user owner successfully for testdir on %s",
                   mount_obj.client_system)

        retval = compare_dir_structure(mount_obj.client_system,
                                       mount_obj.mountpoint, brick_list, 0)
        self.assertTrue(
            retval, "Failed to compare user permission for all"
            " files/dir in mount directory with brick directory")
        g.log.info("User permission is same on mount and brick directory")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1"
               " -type d\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("directory is successfully accessed with different user")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1"
               " -type d | xargs chmod 777\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertNotEqual(rcode, 0, err)
        g.log.info("directory permission are not changed by different user")

        cmd = ("find %s -mindepth 1 -maxdepth 1 -type d | "
               "xargs chgrp -R test_user1" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("Change group owner successfully for testdir on %s",
                   mount_obj.client_system)

        retval = compare_dir_structure(mount_obj.client_system,
                                       mount_obj.mountpoint, brick_list, 1)
        self.assertTrue(
            retval, "Failed to compare group permission for all"
            " files/dir in mount directory with brick directory")
        g.log.info("Group permission is same on mount and brick directory")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1"
               " -type d\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("directory is successfully accessed with different user")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1 -type d "
               "| xargs chmod 777\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertNotEqual(rcode, 0, err)
        g.log.info("directory permission are not changed by different user")

        cmd = ("find %s -mindepth 1 -maxdepth 1 -type d | xargs chmod -R 777" %
               (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("Change permission 777 successfully for testdir on %s",
                   mount_obj.client_system)

        retval = compare_dir_structure(mount_obj.client_system,
                                       mount_obj.mountpoint, brick_list, 2)
        self.assertTrue(
            retval, "Failed to compare permission for all"
            " files/dir in mount directory with brick directory")
        g.log.info("Permission is same on mount and brick directory")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1"
               " -type d\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("directory is successfully accessed with different user")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1"
               " -type d | xargs chmod 666\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertNotEqual(rcode, 0, err)
        g.log.info("directory permission are not changed by different user")
    def test_healing_of_custom_xattrs_on_newly_added_bricks(self):
        """
        Description: Tests to check that the custom xattrs are healed on the
                     dirs when new bricks are added
        Steps :
        1) Create a volume.
        2) Mount the volume using FUSE.
        3) Create 100 directories on the mount point.
        4) Set the xattr on the directories.
        5) Add bricks to the volume and trigger rebalance.
        6) Check if all the bricks have healed.
        7) After rebalance completes, check the xattr for dirs on the newly
           added bricks.
        """
        # pylint: disable=too-many-function-args

        # Creating 1000 directories on volume root
        m_point = self.mounts[0].mountpoint
        command = 'mkdir -p ' + m_point + '/dir{1..100}'
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, ("Directory creation failed on %s",
                                  self.mounts[0].mountpoint))
        g.log.info("Directories created successfully.")

        # Lookup on the mount point
        command = 'ls ' + m_point + '/'
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "ls failed on parent directory")
        g.log.info("ls on parent directory: successful")

        # Setting up the custom xattr for all the directories on mount point
        m_point = self.mounts[0].mountpoint
        command = 'setfattr -n user.foo -v "foobar" ' + m_point + '/dir*'
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "Failed to set the xattr on the"
                         " directories")
        g.log.info("Successfully set custom xattr on the directories")

        # Checking the layout of the directories on the back-end
        flag = validate_files_in_dir(self.clients[0],
                                     m_point,
                                     test_type=k.TEST_LAYOUT_IS_COMPLETE)
        self.assertTrue(flag, "Layout has some holes or overlaps")
        g.log.info("Layout is completely set")

        # Creating a list of directories on the mount point
        list_of_all_dirs = get_dir_contents(self.mounts[0].client_system,
                                            m_point)
        self.assertNotEqual(list_of_all_dirs, None, "Creation of directory"
                            " list failed.")
        g.log.info("Creation of directory list is successful.")

        # Checking the custom xattr on backend bricks for the directories
        self.check_xattr(list_of_all_dirs)

        # Expanding volume by adding bricks to the volume
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Volume %s: Expand failed", self.volname))
        g.log.info("Volume %s: Expand success", self.volname)

        # Start Rebalance
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Volume %s: Failed to start rebalance",
                                  self.volname))
        g.log.info("Volume %s: Rebalance start success", self.volname)

        # Wait for rebalance to complete
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume %s: Rebalance failed to complete",
                              self.volname))
        g.log.info("Volume %s: Rebalance is completed", self.volname)

        # Lookup on the mount point
        command = 'ls -laR ' + m_point + '/'
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "ls failed on parent directory")
        g.log.info("ls on parent directory: successful")

        # Check if all the bricks are healed
        ret = monitor_heal_completion(self.mnode, self.volname,
                                      timeout_period=900)
        self.assertTrue(ret, ("Heal is not complete for all bricks"))
        g.log.info("Healing is complete for all the bricks")

        # Checking the custom xattrs for all the directories on
        # back end bricks after rebalance is complete
        self.check_xattr(list_of_all_dirs)
コード例 #8
0
    def test_rebalance_stop_with_large_file(self):
        """
        Testcase Steps:
        1. Create and start a volume.
        2. Mount volume on client and create a large file.
        3. Add bricks to the volume and check layout
        4. Rename the file such that it hashs to different
           subvol.
        5. Start rebalance on volume.
        6. Stop rebalance on volume.
        """
        # Create file BIG1.
        command = ("dd if=/dev/urandom of={}/BIG1 bs=1024K count=10000".format(
            self.mounts[0].mountpoint))
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "Unable to create file I/O failed")
        g.log.info('Successfully created file BIG1.')

        # Checking if file created on correct subvol or not.
        ret = validate_files_in_dir(
            self.mounts[0].client_system,
            self.mounts[0].mountpoint,
            file_type=k.FILETYPE_FILES,
            test_type=k.TEST_FILE_EXISTS_ON_HASHED_BRICKS)
        self.assertTrue(ret, "Files not created on correct subvol.")
        g.log.info("File BIG1 is on correct subvol according to "
                   "the hash value")

        # Adding brick to volume
        add_brick_list = form_bricks_list_to_add_brick(self.mnode,
                                                       self.volname,
                                                       self.servers,
                                                       self.all_servers_info)
        ret, _, _ = add_brick(self.mnode, self.volname, add_brick_list)
        self.assertEqual(ret, 0, "Unable to add bricks to volume")
        g.log.info("Successfully added bricks to volume.")

        # Check if brick is added successfully or not.
        current_bricks = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(
            current_bricks, "Unable to get "
            "current active bricks of volume")
        g.log.info("Successfully got active bricks of volume.")
        for brick in add_brick_list:
            self.assertIn(brick, current_bricks,
                          ("Brick %s is not added to volume" % brick))

        # Create directory testdir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + '/testdir')
        self.assertTrue(ret, "Failed to create testdir directory")
        g.log.info("Successfuly created testdir directory.")

        # Layout should be set on the new brick and should be
        # continous and complete
        ret = validate_files_in_dir(self.mounts[0].client_system,
                                    self.mounts[0].mountpoint + '/testdir',
                                    test_type=k.TEST_LAYOUT_IS_COMPLETE)
        self.assertTrue(ret, "Layout not set for the new subvol")
        g.log.info("New subvol has been added successfully")

        # Rename file so that it gets hashed to different subvol
        file_index = 0
        path_info_dict = get_pathinfo(self.mounts[0].client_system,
                                      self.mounts[0].mountpoint + '/BIG1')
        initial_brick_set = path_info_dict['brickdir_paths']

        while True:
            # Calculate old_filename and new_filename and rename.
            file_index += 1
            old_filename = "{}/BIG{}".format(self.mounts[0].mountpoint,
                                             file_index)
            new_filename = "{}/BIG{}".format(self.mounts[0].mountpoint,
                                             (file_index + 1))
            ret, _, _ = g.run(self.mounts[0].client_system,
                              "mv {} {}".format(old_filename, new_filename))
            self.assertEqual(ret, 0, "Rename not successful")

            # Checking if it was moved to new subvol or not.
            path_info_dict = get_pathinfo(
                self.mounts[0].client_system,
                self.mounts[0].mountpoint + '/BIG%d' % (file_index + 1))
            if path_info_dict['brickdir_paths'] != initial_brick_set:
                break
        g.log.info("file renamed successfully")

        # Start rebalance on volume
        ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=False)
        self.assertEqual(ret, 0, "Rebalance did not start")
        g.log.info("Rebalance started successfully on volume %s", self.volname)

        # Stop rebelance on volume
        ret, _, _ = rebalance_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Rebalance stop command did not execute.")
        g.log.info("Rebalance stopped successfully on volume %s", self.volname)

        # Get rebalance status in xml
        command = ("gluster volume rebalance {} status --xml".format(
            self.volname))
        ret, _, _ = g.run(self.mnode, command)
        self.assertEqual(
            ret, 1, "Unexpected: Rebalance still running "
            "even after stop.")
        g.log.info("Rebalance is not running after stop.")
    def test_induce_holes_then_lookup(self):

        # pylint: disable=too-many-statements
        m_point = self.mounts[0].mountpoint
        command = 'mkdir -p ' + m_point + '/testdir'
        ret, _, _ = g.run(self.clients[0], command)
        self.assertEqual(ret, 0, "mkdir failed")
        g.log.info("mkdir is successful")

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")

        # Log Volume Info and Status before shrinking the volume.
        g.log.info("Logging volume info and Status before shrinking volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Shrinking volume by removing bricks
        g.log.info("Start removing bricks from volume")
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 self.remove_brick_list, "force")
        self.assertFalse(ret, "Remove-brick with force: FAIL")
        g.log.info("Remove-brick with force: PASS")

        # Check the layout
        dirpath = '/testdir'
        ret = is_layout_complete(self.mnode, self.volname, dirpath)
        self.assertFalse(ret, "Volume %s: Layout is complete")
        g.log.info("Volume %s: Layout has some holes")

        # Mount the volume on a new mount point
        mount_point = tempfile.mkdtemp()
        ret, _, _ = mount_volume(self.volname,
                                 mtype='glusterfs',
                                 mpoint=mount_point,
                                 mserver=self.mnode,
                                 mclient=self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to do gluster mount on volume %s", self.volname))
        g.log.info("Volume %s: mount success", self.mnode)

        # Send a look up on the directory
        cmd = 'ls %s%s' % (mount_point, dirpath)
        ret, _, err = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0,
                         ("Lookup failed on %s with error %s", (dirpath, err)))
        g.log.info("Lookup sent successfully on %s", dirpath)

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")
コード例 #10
0
    def test_dir_change_perm(self):
        # pylint: disable=too-many-statements
        # Start IO on mounts
        g.log.info("Starting IO on all mounts...")
        mount_obj = self.mounts[0]
        cmd = ('cd %s ; mkdir testdir; '
               'mkdir -p testdir/dir{1..10} '
               'touch testdir/file{1..10}') % (mount_obj.mountpoint)
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("IO is successful on mount %s", self.clients[0])

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(mount_obj)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "layout is complete: FAILED")
        g.log.info("layout is complete: PASS")

        brick_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(brick_list, "Failed to get brick list")
        g.log.info("Successful in getting brick list %s", brick_list)

        cmd = ("find %s -mindepth 1 -maxdepth 1 -type d | "
               "xargs chown test_user1" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("Change user owner successfully for testdir on %s",
                   mount_obj.client_system)

        retval = compare_dir_structure(mount_obj.client_system,
                                       mount_obj.mountpoint, brick_list, 0)
        self.assertTrue(
            retval, "Failed to compare user permission for all"
            " files/dir in mount directory with brick directory")
        g.log.info("User permission is same on mount and brick directory")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1 -maxdepth 1"
               " -type d\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("directory is successfully accessed with different user")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1 -maxdepth 1"
               " -type d | xargs chmod 777\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertNotEqual(rcode, 0, err)
        g.log.info("directory permission are not changed by different user")

        cmd = ("find %s -mindepth 1 -maxdepth 1 -type d | "
               "xargs chgrp test_user1" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("Change group owner successfully for testdir on %s",
                   mount_obj.client_system)

        retval = compare_dir_structure(mount_obj.client_system,
                                       mount_obj.mountpoint, brick_list, 1)
        self.assertTrue(
            retval, "Failed to compare group permission for all"
            " files/dir in mount directory with brick directory")
        g.log.info("Group permission is same on mount and brick directory")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1 -maxdepth 1"
               " -type d\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("directory is successfully accessed with different user")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1 -maxdepth 1 -type d "
               "| xargs chmod 777\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertNotEqual(rcode, 0, err)
        g.log.info("directory permission are not changed by different user")

        cmd = ("find %s -mindepth 1 -maxdepth 1 -type d | xargs chmod 777" %
               (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("Change permission 777 successfully for testdir on %s",
                   mount_obj.client_system)

        retval = compare_dir_structure(mount_obj.client_system,
                                       mount_obj.mountpoint, brick_list, 2)
        self.assertTrue(
            retval, "Failed to compare permission for all"
            " files/dir in mount directory with brick directory")
        g.log.info("Permission is same on mount and brick directory")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1 -maxdepth 1"
               " -type d\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertEqual(rcode, 0, err)
        g.log.info("directory is successfully accessed with different user")

        cmd = ("su -l test_user2 -c \"find %s -mindepth 1 -maxdepth 1"
               " -type d | xargs chmod 666\"" % (mount_obj.mountpoint))
        rcode, _, err = g.run(mount_obj.client_system, cmd)
        self.assertNotEqual(rcode, 0, err)
        g.log.info("directory permission are not changed by different user")
コード例 #11
0
    def test_dht_custom_xattr(self):
        """
        Test case:
        1.Create a gluster volume and start it.
        2.Create file and link files.
        3.Create a custom xattr for file.
        4.Verify that xattr for file is displayed on
          mount point and bricks
        5.Modify custom xattr value and verify that xattr
          for file is displayed on mount point and bricks
        6.Verify that custom xattr is not displayed
          once you remove it
        7.Create a custom xattr for symbolic link.
        8.Verify that xattr for symbolic link
          is displayed on mount point and sub-volume
        9.Modify custom xattr value and verify that
          xattr for symbolic link is displayed on
          mount point and bricks
        10.Verify that custom xattr is not
           displayed once you remove it.
        """
        # Initializing variables
        mount_point = self.mounts[0].mountpoint
        self.client_node = self.mounts[0].client_system
        self.list_of_files, list_of_softlinks = [], []
        list_of_hardlinks = []

        for number in range(1, 3):

            # Create regular files
            fname = '{0}/regular_file_{1}'.format(mount_point, str(number))
            ret = append_string_to_file(self.client_node, fname,
                                        'Sample content for file.')
            self.assertTrue(
                ret, "Unable to create regular file "
                "{}".format(fname))
            self.list_of_files.append(fname)

            # Create hard link for file
            hardlink = '{0}/link_file_{1}'.format(mount_point, str(number))
            ret = create_link_file(self.client_node, fname, hardlink)
            self.assertTrue(
                ret, "Unable to create hard link file "
                "{}".format(hardlink))
            list_of_hardlinks.append(hardlink)

            # Create soft link for file
            softlink = '{0}/symlink_file_{1}'.format(mount_point, str(number))
            ret = create_link_file(self.client_node,
                                   fname,
                                   softlink,
                                   soft=True)
            self.assertTrue(
                ret, "Unable to create symlink file "
                "{}".format(softlink))
            list_of_softlinks.append(softlink)

        self.files_and_soft_links = self.list_of_files + list_of_softlinks

        # Check if files are created on the right subvol
        ret = validate_files_in_dir(
            self.client_node,
            mount_point,
            file_type=k.FILETYPE_FILES,
            test_type=k.TEST_FILE_EXISTS_ON_HASHED_BRICKS)
        self.assertTrue(ret, "Files not created on correct sub-vols")
        g.log.info("Files are on correct sub-vols according to "
                   "the hash value")

        # Set custom xattr on all the regular files
        self.set_xattr_user_foo(self.list_of_files, 'bar2')

        # Check if custom xattr is set to all the regular files
        self.check_custom_xattr_visible('bar2')

        # Change the custom xattr on all the regular files
        self.set_xattr_user_foo(self.list_of_files, 'ABC')

        # Check if xattr is set to all the regular files
        self.check_custom_xattr_visible('ABC')

        # Delete Custom xattr from all regular files
        self.delete_xattr_user_foo(self.list_of_files)

        # Check mount point and brick for the xattr
        list_of_all_files = list_of_hardlinks + self.files_and_soft_links
        self.check_mount_point_and_bricks_for_xattr(list_of_all_files)

        # Check if pathinfo xattr is visible
        self.check_for_trusted_glusterfs_pathinfo(self.list_of_files)

        # Set custom xattr on all the regular files
        self.set_xattr_user_foo(list_of_softlinks, 'bar2')

        # Check if custom xattr is set to all the regular files
        self.check_custom_xattr_visible('bar2')

        # Change the custom xattr on all the regular files
        self.set_xattr_user_foo(list_of_softlinks, 'ABC')

        # Check if xattr is set to all the regular files
        self.check_custom_xattr_visible('ABC')

        # Delete Custom xattr from all regular files
        self.delete_xattr_user_foo(list_of_softlinks)

        # Check mount point and brick for the xattr
        self.check_mount_point_and_bricks_for_xattr(list_of_all_files)

        # Check if pathinfo xattr is visible
        self.check_for_trusted_glusterfs_pathinfo(list_of_softlinks)
    def test_remove_brick_while_rebalance_is_running(self):

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s",
                    self.clients[0], self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0], self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")

        # Log Volume Info and Status before expanding the volume.
        g.log.info("Logging volume info and Status before expanding volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Expanding volume by adding bricks to the volume
        g.log.info("Start adding bricks to volume")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Volume %s: Expand failed", self.volname))
        g.log.info("Volume %s: Expand successful", self.volname)

        # Wait for gluster processes to come online
        g.log.info("Wait for gluster processes to come online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume %s: one or more volume process are "
                              "not up", self.volname))
        g.log.info("All volume %s processes are online", self.volname)

        # Log Volume Info and Status after expanding the volume
        g.log.info("Logging volume info and Status after expanding volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Verify volume's all process are online
        g.log.info("Volume %s: Verifying that all process are online",
                   self.volname)
        ret = verify_all_process_of_volume_are_online(self.mnode,
                                                      self.volname)
        self.assertTrue(ret, ("Volume %s : All process are not online ",
                              self.volname))
        g.log.info("Volume %s: All process are online", self.volname)

        # Start Rebalance
        g.log.info("Starting rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Volume %s: Failed to start rebalance",
                                  self.volname))
        g.log.info("Volume %s: Rebalance started ", self.volname)

        # Check if rebalance is running
        status_info = get_rebalance_status(self.mnode, self.volname)
        status = status_info['aggregate']['statusStr']
        if 'in progress' in status:
            # Shrinking volume by removing bricks
            g.log.info("Start removing bricks from volume")
            _, _, err = remove_brick(self.mnode, self.volname,
                                     self.remove_brick_list, "start")
            self.assertIn("Rebalance is in progress", err, "Successfully "
                          "removed bricks while volume rebalance is "
                          "in-progress")
            g.log.info("Failed to start remove-brick as rebalance is "
                       "in-progress")
        else:
            g.log.error("Rebalance process is not running")
            raise ExecutionError("Rebalance process is not running")
コード例 #13
0
    def test_create_link_for_directory(self):

        m_point = self.mounts[0].mountpoint
        fqpath_for_test_dir = m_point + '/test_dir'
        flag = mkdir(self.clients[0], fqpath_for_test_dir, True)
        self.assertTrue(flag, "Failed to create a directory")
        fqpath = m_point + '/test_dir/dir{1..3}'
        flag = mkdir(self.clients[0], fqpath, True)
        self.assertTrue(flag, "Failed to create sub directories")
        flag = validate_files_in_dir(self.clients[0],
                                     fqpath_for_test_dir,
                                     test_type=k.TEST_LAYOUT_IS_COMPLETE)
        self.assertTrue(flag, "Layout of test directory is not complete")
        g.log.info("Layout for directory is complete")

        sym_link_path = m_point + '/' + 'test_sym_link'
        command = 'ln -s ' + fqpath_for_test_dir + ' ' + sym_link_path
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "Failed to create symlink for test_dir")

        command = 'stat ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "Stat command didn't return the details "
                         "correctly")
        flag = False
        if 'symbolic link' in out:
            flag = True
        self.assertTrue(flag, "The type of the link is not symbolic")
        g.log.info("The link is symbolic")
        flag = False
        if search(fqpath_for_test_dir, out):
            flag = True
        self.assertTrue(flag, "sym link does not point to correct " "location")
        g.log.info("sym link points to right directory")
        g.log.info("The details of the symlink are correct")

        command = 'ls -id ' + fqpath_for_test_dir + ' ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "Inode numbers not retrieved by the "
                         "ls command")
        list_of_inode_numbers = out.split('\n')
        if (list_of_inode_numbers[0].split(' ')[0] ==
                list_of_inode_numbers[1].split(' ')[0]):
            flag = False
        self.assertTrue(
            flag, "The inode numbers of the dir and sym link "
            "are same")
        g.log.info("Verified: inode numbers of the test_dir "
                   "and its sym link are different")

        command = 'ls ' + sym_link_path
        ret, out1, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "Failed to list the contents using the "
                         "sym link")
        command = 'ls ' + fqpath_for_test_dir
        ret, out2, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(
            ret, 0, "Failed to list the contents of the "
            "test_dir using ls command")
        flag = False
        if out1 == out2:
            flag = True
        self.assertTrue(
            flag, "The contents listed using the sym link "
            "are not the same")
        g.log.info("The contents listed using the symlink are"
                   " the same as that of the test_dir")

        command = 'getfattr -d -m . -e hex ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "failed to retrieve xattrs")
        list_xattrs = ['trusted.gfid', 'trusted.glusterfs.dht']
        for xattr in list_xattrs:
            if xattr in out:
                flag = False
        self.assertTrue(
            flag, "Important xattrs are being compromised"
            " using the symlink at the mount point")
        g.log.info("Verified: mount point doesn't display important "
                   "xattrs using the symlink")

        path_info_1 = get_pathinfo(self.mounts[0].client_system,
                                   fqpath_for_test_dir)
        path_info_2 = get_pathinfo(self.mounts[0].client_system, sym_link_path)
        if path_info_1 == path_info_2:
            flag = True
        self.assertTrue(
            flag, "Pathinfos for test_dir and its sym link "
            "are not same")
        g.log.info("Pathinfos for test_dir and its sym link are same")

        command = 'readlink ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "readlink command returned an error")
        flag = False
        if out.rstrip() == fqpath_for_test_dir:
            flag = True
        self.assertTrue(flag, "readlink did not return the path of the "
                        "test_dir")
        g.log.info("readlink successfully returned the path of the test_dir")
コード例 #14
0
    def test_rmdir_dir_when_hash_nonhash_vol_down(self):
        """
        case -2:
        - create dir1 and dir2
        - bring down hashed subvol for dir1
        - bring down a non-hashed subvol for dir2
        - rmdir dir1 should fail with ENOTCONN
        - rmdir dir2 should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Create dir1 and dir2
        directory_list = []
        for number in range(1, 3):
            directory_list.append('{}/dir{}'.format(self.mountpoint, number))
            ret = mkdir(self.clients[0], directory_list[-1])
            self.assertTrue(ret, ('mkdir failed for %s '
                                  % directory_list[-1]))
            g.log.info("mkdir of directory %s successful",
                       directory_list[-1])

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols, "/",
                                                        "dir1")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir1 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[0])
        self.assertFalse(ret, ('Expected rmdir to fail for %s'
                               % directory_list[0]))
        g.log.info("rmdir of directory %s failed as expected",
                   directory_list[0])

        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Unmounting and Mounting the volume back to Heal
        ret, _, err = umount_volume(self.clients[1], self.mountpoint)
        self.assertFalse(ret, "Error in creating temp mount %s" % err)

        ret, _, err = mount_volume(self.volname,
                                   mtype='glusterfs',
                                   mpoint=self.mountpoint,
                                   mserver=self.servers[0],
                                   mclient=self.clients[1])
        self.assertFalse(ret, "Error in creating temp mount")

        ret, _, _ = g.run(self.clients[1], ("ls %s/dir1" % self.mountpoint))
        self.assertEqual(ret, 0, "Error in lookup for dir1")
        g.log.info("lookup successful for dir1")

        # This confirms that healing is done on dir1
        ret = validate_files_in_dir(self.clients[0],
                                    directory_list[0],
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "validate_files_in_dir for dir1 failed")
        g.log.info("healing successful for dir1")

        # Bring down the hashed subvol
        # Find a hashed subvolume(or brick)
        hashed_subvol, count = find_hashed_subvol(self.subvols, "/", "dir2")
        self.assertIsNotNone(hashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("hashed_subvol %s", hashed_subvol._host)

        # Bring hashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir2 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[1])
        self.assertFalse(ret, ('Expected rmdir to fail for %s'
                               % directory_list[1]))
        g.log.info("rmdir of dir2 directory %s failed as expected",
                   directory_list[1])

        # Cleanup
        # Bring up the subvol - restart the volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Delete dirs
        for directory in directory_list:
            ret = rmdir(self.clients[0], directory)
            self.assertTrue(ret, ('rmdir failed for %s ' % directory))
            g.log.info("rmdir of directory %s successful", directory)
コード例 #15
0
    def copy_dir(self):
        """
        Description:
        This test creates a parent directory and subdirectories
        at mount point. After that it creates a copy of parent
        directory at mount point, first when destination
        directory is not there, and second sub-test creates a
        copy after creating destination directory for copying.
        In the first test, contents will be copied from one
        directory to another but in the second test case, entire
        directory will be copied to another directory along with
        the contents.Then it checks for correctness of layout
        and content of source and copied directory at all
        sub-vols.
        """

        g.log.info("creating multiple,multilevel directories")
        m_point = self.mounts[0].mountpoint
        fqpath = m_point + '/root_dir/test_dir{1..3}'
        client_ip = self.clients[0]
        flag = mkdir(client_ip, fqpath, True)
        self.assertTrue(flag, "Directory creation: failed")

        command = 'ls ' + m_point + '/root_dir'
        ret, out, _ = g.run(client_ip, command)
        self.assertEqual(ret, 0, "can't list the created directories")

        list_of_created_dirs = out.split('\n')
        flag = True
        for x_count in range(3):
            dir_name = 'test_dir%d' % (x_count + 1)
            if dir_name not in list_of_created_dirs:
                flag = False
        self.assertTrue(flag, "ls command didn't list all the directories")
        g.log.info("creation of multiple,multilevel directories created")

        g.log.info("creating files at different directory levels")
        command = 'touch ' + m_point + '/root_dir/test_file{1..5}'
        ret, _, _ = g.run(client_ip, command)
        self.assertEqual(ret, 0, "files not created")

        command = 'ls ' + m_point + '/root_dir'
        ret, out, _ = g.run(client_ip, command)
        self.assertEqual(ret, 0, "can't list the created directories")

        list_of_files_and_dirs = out.split('\n')
        flag = True
        for x_count in range(3):
            dir_name = 'test_dir%d' % (x_count + 1)
            if dir_name not in list_of_files_and_dirs:
                flag = False
        for x_count in range(5):
            file_name = 'test_file%d' % (x_count + 1)
            if file_name not in list_of_files_and_dirs:
                flag = False
        self.assertTrue(
            flag, "ls command didn't list all the directories and files")
        g.log.info("creation of files at multiple levels successful")

        if not self.destination_exists:
            destination_dir = 'root_dir_1'
        else:
            fqpath = m_point + '/new_dir'
            flag = mkdir(client_ip, fqpath, True)
            self.assertTrue(flag, "new_dir not created")
            destination_dir = 'new_dir/root_dir'

        g.log.info("performing layout checks for root_dir")
        flag = validate_files_in_dir(self.clients[0],
                                     m_point + '/root_dir',
                                     const.TEST_FILE_EXISTS_ON_HASHED_BRICKS)
        self.assertTrue(flag, "root directory not present on every brick")

        flag = validate_files_in_dir(self.clients[0],
                                     m_point + '/root_dir',
                                     test_type=(
                                         const.TEST_LAYOUT_IS_COMPLETE))
        self.assertTrue(flag, "layout of every directory is complete")
        g.log.info("every directory is present on every brick and layout "
                   "of each brick is correct")

        g.log.info("copying root_dir at the mount point")
        command = "cp -r " + m_point + '/root_dir ' + m_point \
            + '/' + destination_dir
        ret, out, _ = g.run(client_ip, command)
        self.assertEqual(ret, 0, "directory was not copied")

        g.log.info("performing layout checks for copied directory")

        flag = validate_files_in_dir(self.clients[0],
                                     m_point + '/' + destination_dir,
                                     const.TEST_FILE_EXISTS_ON_HASHED_BRICKS)
        self.assertTrue(flag, "directories not present on every brick")

        flag = validate_files_in_dir(self.clients[0],
                                     m_point + '/' + destination_dir,
                                     test_type=(
                                         const.TEST_LAYOUT_IS_COMPLETE))
        self.assertTrue(flag, "layout of every directory is complete")
        g.log.info("verified: layouts correct")

        g.log.info("listing the copied directory")
        command = 'ls -A1 ' + m_point + '/' + destination_dir
        ret, out, _ = g.run(client_ip, command)
        self.assertIsNotNone(out, "copied directory not listed")

        g.log.info("copied directory listed")
        command = 'ls -A1 ' + m_point + '/root_dir'
        ret, out1, _ = g.run(client_ip, command)
        self.assertEqual(ret, 0, "details of root_dir not listed")

        command = 'ls -A1 ' + m_point + '/' + destination_dir
        ret, out2, _ = g.run(client_ip, command)
        self.assertEqual(ret, 0, "details of copied dir not listed")
        self.assertEqual(out1, out2,
                         "contents and attributes of original and "
                         "copied directory not same")
        g.log.info("the contents and attributes of copied directory "
                   "are same")

        g.log.info("listing the copied directory on all the subvolumes")
        brick_list = get_all_bricks(self.mnode, self.volname)
        for brick in brick_list:

            brick_tuple = brick.partition(':')
            brick_path = brick_tuple[2]
            host_addr = brick_tuple[0]

            command = 'ls -A1 ' + brick_path + '/' + destination_dir
            ret, out, _ = g.run(host_addr, command)
            self.assertIsNotNone(out,
                                 ("copied directory not listed on brick "
                                  "%s", brick))

            g.log.info("copied directory listed on brick %s", brick)
            command = 'ls -l --time-style=\'+\' ' + brick_path \
                + '/root_dir/' + ' | grep ^d'
            ret, out1, _ = g.run(host_addr, command)
            self.assertEqual(ret, 0, "details of root_dir not listed on "
                                     "brick %s" % brick)

            command = 'ls -l --time-style=\'+\' ' + brick_path + '/' \
                + destination_dir + '| grep ^d'
            ret, out2, _ = g.run(host_addr, command)
            self.assertEqual(ret, 0, "details of copied dir not listed on "
                                     "brick %s" % brick)
            self.assertEqual(out1, out2,
                             "contents and attributes of original and "
                             "copied directory not same on brick "
                             "%s" % brick)
            g.log.info("the contents and attributes of copied directory "
                       "are same on brick %s", brick)
        g.log.info("the copied directory is present on all the subvolumes")
    def test_directory_custom_extended_attr(self):
        """Test - set custom xattr to directory and link to directory
        """
        # pylint: disable = too-many-statements
        dir_prefix = '{root}/folder_{client_index}'

        for mount_index, mount_point in enumerate(self.mounts):
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)

            # Create a directory from mount point
            g.log.info('Creating directory : %s:%s', mount_point.mountpoint,
                       folder_name)
            ret = mkdir(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Failed to create directory %s on mount point %s' %
                (folder_name, mount_point.mountpoint))

            ret = file_exists(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Created Directory %s does not exists on mount '
                'point %s' % (folder_name, mount_point.mountpoint))
            g.log.info('Created directory %s:%s', mount_point.mountpoint,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_DIR)
            self.assertTrue(
                ret, "Expected - Directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display
            # xattr : trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trusted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))
            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint, folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check for xattr trusted.glusterfs.pathinfo on %s:%s",
                        mount_point, folder_name)
            ret = get_fattr(mount_point.client_system,
                            mount_point.mountpoint,
                            'trusted.glusterfs.pathinfo',
                            encode="text")
            self.assertIsNotNone(
                ret, "trusted.glusterfs.pathinfo is not "
                "presented on %s:%s" % (mount_point.mountpoint, folder_name))
            g.log.info(
                'pathinfo xattr is displayed on mount point %s and '
                'dir %s', mount_point.mountpoint, folder_name)

            # Create a custom xattr for dir
            g.log.info("Set attribute user.foo to %s", folder_name)
            ret = set_fattr(mount_point.client_system, folder_name, 'user.foo',
                            'bar2')
            self.assertTrue(
                ret, "Setup custom attribute on %s:%s failed" %
                (mount_point.client_system, folder_name))

            g.log.info('Set custom attribute is set on %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check xarttr user.foo on %s:%s',
                        mount_point.client_system, folder_name)
            ret = get_fattr(mount_point.client_system,
                            folder_name,
                            'user.foo',
                            encode="text")
            self.assertEqual(
                ret, 'bar2', "Xattr attribute user.foo is not presented on "
                "mount point %s and directory %s" %
                (mount_point.client_system, folder_name))

            g.log.info(
                'Custom xattr user.foo is presented on mount point'
                ' %s:%s ', mount_point.client_system, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)

                ret = get_fattr(brick_server,
                                brick_path,
                                'user.foo',
                                encode="text")

                g.log.debug('Check custom xattr for directory on brick %s:%s',
                            brick_server, brick_path)
                self.assertEqual(
                    'bar2', ret, "Expected: user.foo should be on brick %s\n"
                    "Actual: Value of attribute foo.bar %s" %
                    (brick_path, ret))
                g.log.info('Custom xattr is presented on brick %s', brick_path)

            # Delete custom attribute
            ret = delete_fattr(mount_point.client_system, folder_name,
                               'user.foo')
            self.assertTrue(ret, "Failed to delete custom attribute")

            g.log.info('Removed custom attribute from directory %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks

            g.log.debug('Looking if custom extra attribute user.foo is '
                        'presented on mount or on bricks after deletion')
            self.assertIsNone(
                get_fattr(mount_point.client_system,
                          folder_name,
                          'user.foo',
                          encode="text"),
                "Xattr user.foo is presented on mount point"
                " %s:%s after deletion" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                "Xattr user.foo is not presented after deletion"
                " on mount point %s:%s", mount_point.mountpoint, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)
                self.assertIsNone(
                    get_fattr(brick_server, brick_path, 'user.foo'),
                    "Deleted xattr user.foo is presented on "
                    "brick %s:%s" % (brick, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from directory on brick %s:%s', brick, brick_path)

        # Repeat all of the steps for link of created directory
        for mount_index, mount_point in enumerate(self.mounts):
            linked_folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                                   client_index="%s_linked" %
                                                   mount_index)
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)
            # Create link to created dir
            command = 'ln -s {src} {dst}'.format(dst=linked_folder_name,
                                                 src=folder_name)
            ret, _, _ = g.run(mount_point.client_system, command)
            self.assertEqual(
                0, ret, 'Failed to create link %s to directory %s' %
                (linked_folder_name, folder_name))
            self.assertTrue(
                file_exists(mount_point.client_system, linked_folder_name),
                'Link does not exists on %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info('Create link %s to directory %s', linked_folder_name,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the link to dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_LINK)
            self.assertTrue(
                ret, "Expected - Link to directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display xattr :
            # trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, linked_folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trudted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint,
                linked_folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check if pathinfo is presented on %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertIsNotNone(
                get_fattr(mount_point.client_system, mount_point.mountpoint,
                          'trusted.glusterfs.pathinfo'),
                "pathinfo is not displayed on mountpoint "
                "%s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info('pathinfo value is displayed on mount point %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Set custom Attribute to link
            g.log.debug("Set custom xattribute user.foo to %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                set_fattr(mount_point.client_system, linked_folder_name,
                          'user.foo', 'bar2'))
            g.log.info('Successful in set custom attribute to %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check mountpoint and bricks for custom xattribute')
            self.assertEqual(
                'bar2',
                get_fattr(mount_point.client_system,
                          linked_folder_name,
                          'user.foo',
                          encode="text"),
                'Custom xattribute is not presented on '
                'mount point %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattribute is presented on mount point %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertEqual(
                    get_fattr(brick_server,
                              brick_path,
                              'user.foo',
                              encode="text"), 'bar2',
                    "Actual: custom attribute not "
                    "found on brick %s:%s" % (brick_server, brick_path))
                g.log.info('Custom xattr for link found on brick %s:%s', brick,
                           brick_path)

            # Delete custom attribute
            g.log.debug('Removing customer attribute on mount point %s:%s',
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                delete_fattr(mount_point.client_system, linked_folder_name,
                             'user.foo'), 'Fail on delete xattr user.foo')
            g.log.info('Deleted custom xattr from link %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks
            g.log.debug(
                "Check if custom xattr is presented on %s:%s "
                "after deletion", mount_point.client_system,
                linked_folder_name)
            self.assertIsNone(
                get_fattr(mount_point.client_system,
                          linked_folder_name,
                          'user.foo',
                          encode="text"),
                "Expected: xattr user.foo to be not presented on"
                " %s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattr user.foo is not presented on %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertIsNone(
                    get_fattr(brick_server,
                              brick_path,
                              'user.foo',
                              encode="text"),
                    "Extended custom attribute is presented on "
                    "%s:%s after deletion" % (brick_server, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from link on brick %s:%s', brick_server, brick_path)

        g.log.info('Directory - custom extended attribute validation getfattr,'
                   ' setfattr is successful')
コード例 #17
0
    def test_distribution_hash_value(self):
        """Test case tests DHT of files and directories based on hash value
        """
        # pylint: disable=too-many-locals
        for client_index, mount_obj in enumerate(self.mounts):
            client_host = mount_obj.client_system
            mountpoint = mount_obj.mountpoint

            # Create directory for initial data
            g.log.debug("Creating temporary folder on client's machine %s:%s",
                        client_host, self.temp_folder)
            if not mkdir(client_host, self.temp_folder):
                g.log.error("Failed create temporary directory "
                            "on client machine %s:%s",
                            client_host, self.temp_folder)
                raise ExecutionError("Failed create temporary directory "
                                     "on client machine %s:%s" %
                                     (client_host, self.temp_folder))
            g.log.info('Created temporary directory on client machine %s:%s',
                       client_host, self.temp_folder)
            # Prepare a set of data
            files = ["{prefix}{file_name}_{client_index}".
                     format(file_name=file_name,
                            client_index=client_index,
                            prefix='' if randint(1, 6) % 2
                            else choice('ABCD') + '/')
                     for file_name in map(chr, range(97, 123))]
            ret = self.create_files(client_host, self.temp_folder,
                                    files,
                                    "Lorem Ipsum is simply dummy text of the "
                                    "printing and typesetting industry.")
            self.assertTrue(ret, "Failed creating a set of files and dirs "
                                 "on %s:%s" % (client_host, self.temp_folder))
            g.log.info('Created data set on client machine on folder %s:%s',
                       client_host, self.temp_folder)

            # Copy prepared data to mount point
            cmd = ('cp -vr {source}/* {destination}'.format(
                source=self.temp_folder,
                destination=mountpoint))
            ret, _, _ = g.run(client_host, cmd)
            self.assertEqual(ret, 0, "Copy data to mount point %s:%s Failed")
            g.log.info('Copied prepared data to mount point %s:%s',
                       client_host, mountpoint)

            # Verify that hash layout values are set on each
            # bricks for the dir
            g.log.debug("Verifying DHT layout")
            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=TEST_LAYOUT_IS_COMPLETE)
            self.assertTrue(ret, "TEST_LAYOUT_IS_COMPLETE: FAILED")
            g.log.info("TEST_LAYOUT_IS_COMPLETE: PASS on %s:%s ",
                       client_host, mountpoint)

            g.log.debug("Verifying files and directories")
            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_DIRS)
            self.assertTrue(ret, "TEST_FILE_EXISTS_ON_HASHED_BRICKS: FAILED")
            g.log.info("TEST_FILE_EXISTS_ON_HASHED_BRICKS: PASS")

            # Verify "trusted.gfid" extended attribute of the
            # directory/file on all the bricks
            gfids = dict()
            g.log.debug("Check if trusted.gfid is presented on the bricks")
            for brick_item in get_all_bricks(self.mnode, self.volname):
                brick_host, brick_dir = brick_item.split(':')

                for target_destination in files:
                    if not file_exists(brick_host, '{brick_dir}/{dest}'.
                                       format(brick_dir=brick_dir,
                                              dest=target_destination)):
                        continue
                    ret = get_fattr(brick_host, '%s/%s' %
                                    (brick_dir, target_destination),
                                    'trusted.gfid')
                    self.assertIsNotNone(ret,
                                         "trusted.gfid is not presented "
                                         "on %s/%s" % (brick_dir,
                                                       target_destination))
                    g.log.info("Verified trusted.gfid on brick %s:%s",
                               brick_item, target_destination)
                    gfids.setdefault(target_destination, []).append(ret)

            g.log.debug('Check if trusted.gfid is same on all the bricks')
            self.assertTrue(all([False if len(set(gfids[k])) > 1 else True
                                 for k in gfids]),
                            "trusted.gfid should be same on all the bricks")
            g.log.info('trusted.gfid is same on all the bricks')
            # Verify that mount point shows pathinfo xattr.
            g.log.debug("Check if pathinfo is presented on mount point "
                        "%s:%s", client_host, mountpoint)
            ret = get_fattr(client_host, mountpoint,
                            'trusted.glusterfs.pathinfo')
            self.assertIsNotNone(ret, "pathinfo is not presented on mount "
                                      "point %s:%s" % (client_host,
                                                       mountpoint))

            g.log.info('trusted.glusterfs.pathinfo is presented on mount'
                       ' point %s:%s', client_host, mountpoint)

            # Mount point should not display xattr:
            # trusted.gfid and trusted.glusterfs.dht
            g.log.debug("Check if trusted.gfid and trusted.glusterfs.dht are "
                        "not presented on mount point %s:%s", client_host,
                        mountpoint)
            attributes = get_fattr_list(client_host, mountpoint)
            self.assertFalse('trusted.gfid' in attributes,
                             "Expected: Mount point shouldn't display xattr:"
                             "{xattr}. Actual: xattrs {xattr} is "
                             "presented on mount point".
                             format(xattr='trusted.gfid'))
            self.assertFalse('trusted.glusterfs.dht' in attributes,
                             "Expected: Mount point shouldn't display xattr:"
                             "{xattr}. Actual: xattrs {xattr} is "
                             "presented on mount point".
                             format(xattr='trusted.glusterfs.dht'))

            g.log.info("trusted.gfid and trusted.glusterfs.dht are not "
                       "presented on mount point %s:%s", client_host,
                       mountpoint)
        g.log.info('Files and dirs are stored on bricks based on hash value')
コード例 #18
0
    def test_rebalance_with_hidden_files(self):
        # pylint: disable=too-many-statements
        # Start IO on mounts
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_files "
                   "--base-file-name . "
                   "-f 99 %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)

        # validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # Verify DHT values across mount points
        for mount_obj in self.mounts:
            g.log.debug("Verifying hash layout values %s:%s",
                        mount_obj.client_system, mount_obj.mountpoint)
            ret = validate_files_in_dir(mount_obj.client_system,
                                        mount_obj.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_FILES)
            self.assertTrue(
                ret, "Expected - Files are created on only "
                "sub-volume according to its hashed value")
            g.log.info("Hash layout values are verified %s:%s",
                       mount_obj.client_system, mount_obj.mountpoint)

        # Getting areequal checksum before rebalance
        g.log.info("Getting areequal checksum before rebalance")
        arequal_checksum_before_rebalance = collect_mounts_arequal(self.mounts)

        # Log Volume Info and Status before expanding the volume.
        g.log.info("Logging volume info and Status before expanding volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Expanding volume by adding bricks to the volume
        g.log.info("Start adding bricks to volume")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
        g.log.info("Expanding volume is successful on "
                   "volume %s", self.volname)

        # Wait for gluster processes to come online
        g.log.info("Wait for gluster processes to come online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online", self.volname))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.volname)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Volume %s : All process are not online ", self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Log Volume Info and Status after expanding the volume
        g.log.info("Logging volume info and Status after expanding volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Start Rebalance
        g.log.info("Starting Rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
                                  "%s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s",
                   self.volname)

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
                              "%s", self.volname))
        g.log.info("Rebalance is successfully complete on the volume %s",
                   self.volname)

        # Checking if there are any migration failures
        status = get_rebalance_status(self.mnode, self.volname)
        for each_node in status['node']:
            failed_files_count = int(each_node['failures'])
            self.assertEqual(
                failed_files_count, 0,
                "Rebalance failed to migrate few files on %s" %
                each_node['nodeName'])
            g.log.info("There are no migration failures")

        # Getting areequal checksum after rebalance
        g.log.info("Getting areequal checksum after rebalance")
        arequal_checksum_after_rebalance = collect_mounts_arequal(self.mounts)

        # Comparing arequals checksum before and after rebalance
        g.log.info("Comparing arequals checksum before and after rebalance")
        self.assertEqual(arequal_checksum_before_rebalance,
                         arequal_checksum_after_rebalance,
                         "arequal checksum is NOT MATCHNG")
        g.log.info("arequal checksum is SAME")
コード例 #19
0
    def test_create_link_for_directory(self):

        g.log.info("creating a directory at mount point")
        m_point = self.mounts[0].mountpoint
        test_dir_path = 'test_dir'
        fqpath = m_point + '/' + test_dir_path
        flag = mkdir(self.clients[0], fqpath, True)
        self.assertTrue(flag, "failed to create a directory")
        fqpath = m_point + '/' + test_dir_path + '/dir{1..3}'
        flag = mkdir(self.clients[0], fqpath, True)
        self.assertTrue(flag, "failed to create sub directories")
        flag = validate_files_in_dir(self.clients[0],
                                     m_point + '/test_dir',
                                     test_type=k.TEST_LAYOUT_IS_COMPLETE)
        self.assertTrue(flag, "layout of test directory is complete")
        g.log.info("directory created successfully")

        g.log.info("creating a symlink for test_dir")
        sym_link_path = m_point + '/' + 'test_sym_link'
        command = 'ln -s ' + m_point + '/test_dir ' + sym_link_path
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "failed to create symlink for test_dir")

        command = 'stat ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "stat command didn't return the details "
                         "correctly")
        flag = False
        g.log.info("checking if the link is symbolic")
        if 'symbolic link' in out:
            flag = True
        self.assertTrue(flag, "the type of the link is not symbolic")
        g.log.info("the link is symbolic")
        g.log.info("checking if the sym link points to right directory")
        index_start = out.find('->') + 6
        index_end = out.find("\n") - 3
        dir_pointed = out[index_start:index_end]
        flag = False
        if dir_pointed == m_point + '/' + test_dir_path:
            flag = True
        self.assertTrue(flag, "sym link does not point to correct " "location")
        g.log.info("sym link points to right directory")
        g.log.info("The details of the symlink are correct")

        g.log.info("verifying that inode number of the test_dir "
                   "and its sym link are different")
        command = 'ls -id ' + m_point + '/' + \
            test_dir_path + ' ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "inode numbers not retrieved by the "
                         "ls command")
        list_of_inode_numbers = out.split('\n')
        flag = True
        if (list_of_inode_numbers[0].split(' ')[0] ==
                list_of_inode_numbers[1].split(' ')[0]):
            flag = False
        self.assertTrue(
            flag, "the inode numbers of the dir and sym link "
            "are same")
        g.log.info("verified: inode numbers of the test_dir "
                   "and its sym link are different")

        g.log.info("listing the contents of the test_dir from its sym " "link")
        command = 'ls ' + sym_link_path
        ret, out1, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "failed to list the contents using the "
                         "sym link")
        command = 'ls ' + m_point + '/' + test_dir_path
        ret, out2, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(
            ret, 0, "failed to list the contents of the "
            "test_dir using ls command")
        flag = False
        if out1 == out2:
            flag = True
        self.assertTrue(
            flag, "the contents listed using the sym link "
            "are not the same")
        g.log.info("the contents listed using the symlink are"
                   " the same as that of the test_dir")

        g.log.info("verifying that mount point doesn't display important "
                   "xattrs using the symlink")
        command = 'getfattr -d -m . -e hex ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "failed to retrieve xattrs")
        list_xattrs = ['trusted.gfid', 'trusted.glusterfs.dht']
        flag = True
        for xattr in list_xattrs:
            if xattr in out:
                flag = False
        self.assertTrue(
            flag, "important xattrs are being compromised"
            " using the symlink at the mount point")
        g.log.info("verified: mount point doesn't display important "
                   "xattrs using the symlink")

        g.log.info("verifying that mount point shows path info xattr for the"
                   " test_dir and sym link and is same for both")
        path_info_1 = get_pathinfo(self.mounts[0].client_system,
                                   m_point + '/' + test_dir_path)
        path_info_2 = get_pathinfo(self.mounts[0].client_system, sym_link_path)
        if path_info_1 == path_info_2:
            flag = True
        self.assertTrue(
            flag, "pathinfos for test_dir and its sym link "
            "are not same")
        g.log.info("pathinfos for test_dir and its sym link are same")

        g.log.info("verifying readlink on sym link at mount point returns "
                   "the name of the directory")
        command = 'readlink ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "readlink command returned an error")
        flag = False
        if out.rstrip() == m_point + '/' + test_dir_path:
            flag = True
        self.assertTrue(flag, "readlink did not return the path of the "
                        "test_dir")
        g.log.info("readlink successfully returned the path of the test_dir")
コード例 #20
0
    def test_volume_start_stop_while_rebalance_is_in_progress(self):
        # DHT Layout and hash validation
        for mount_obj in self.mounts:
            g.log.debug("Verifying hash layout values %s:%s",
                        mount_obj.client_system, mount_obj.mountpoint)
            ret = validate_files_in_dir(mount_obj.client_system,
                                        mount_obj.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_FILES |
                                        FILETYPE_DIRS)
            self.assertTrue(ret, "Hash Layout Values: Fail")
            g.log.info("Hash layout values are verified %s:%s",
                       mount_obj.client_system, mount_obj.mountpoint)

        # Log Volume Info and Status before expanding the volume.
        g.log.info("Logging volume info and Status before expanding volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        g.log.error(ret, "Logging volume info and status failed on "
                         "volume %s", self.volname)
        g.log.info("Logging volume info and status was successful for volume "
                   "%s", self.volname)

        # Expanding volume by adding bricks to the volume
        g.log.info("Start adding bricks to volume")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info,)
        self.assertTrue(ret, ("Failed to expand the volume on volume %s ",
                              self.volname))
        g.log.info("Expanding volume is successful on volume %s", self.volname)

        # Wait for gluster processes to come online
        g.log.info("Wait for gluster processes to come online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online", self.volname))
        g.log.info("Successful in waiting for volume %s processes to be "
                   "online", self.volname)

        # Log Volume Info and Status after expanding the volume
        g.log.info("Logging volume info and Status after expanding volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Error: Volume processes failed to come up for "
                              "%s", self.volname))
        g.log.info("All processes are up for volume %s", self.volname)

        # Wait for gluster processes to come online
        g.log.info("Wait for gluster processes to come online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Error: Volume processes failed to come up for "
                              "%s", self.volname))
        g.log.info("All processes are up for volume %s", self.volname)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode,
                                                      self.volname)
        self.assertTrue(ret, ("Volume %s : All process are not online ",
                              self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Start Rebalance
        g.log.info("Starting rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
                                  "%s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s ",
                   self.volname)

        # Logging rebalance status
        g.log.info("Logging rebalance status")
        status_info = get_rebalance_status(self.mnode, self.volname)
        status = status_info['aggregate']['statusStr']

        self.assertIn('in progress', status,
                      "Rebalance process is not running")
        g.log.info("Rebalance process is running")

        ret, out, err = volume_stop(self.mnode, self.volname)
        g.log.debug("Rebalance info: %s", out)

        self.assertIn("rebalance session is in progress", err, " Volume "
                      "stopped successfully while rebalance session is in "
                      "progress")
        g.log.info("Volume stop failed as rebalance session is in "
                   "progress")

        # Check volume info to check the status of volume
        g.log.info("Checking volume info for the volume status")
        status_info = get_volume_info(self.mnode, self.volname)
        status = status_info[self.volname]['statusStr']
        self.assertIn('Started', status, ("Volume %s state is \"Stopped\"",
                                          self.volname))
        g.log.info("Volume %s state is \"Started\"", self.volname)
コード例 #21
0
    def test_expanding_volume_when_io_in_progress(self):
        # pylint: disable=too-many-statements
        # Log Volume Info and Status before expanding the volume.
        g.log.info("Logging volume info and Status before expanding volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Expanding volume by adding bricks to the volume when IO in progress
        g.log.info("Start adding bricks to volume when IO in progress")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Failed to expand the volume while IO in "
                              "progress on volume %s", self.volname))
        g.log.info(
            "Expanding volume while IO in progress on "
            "volume %s : Success", self.volname)

        # Wait for gluster processes to come online
        g.log.info("Wait for gluster processes to come online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online", self.volname))
        g.log.info("Waiting for volume %s process to be online", self.volname)

        # Log Volume Info and Status after expanding the volume
        g.log.info("Logging volume info and Status after expanding volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Volume %s : All process are not online", self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Start Rebalance
        g.log.info("Starting Rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
                                  "%s", self.volname))
        g.log.info("Started rebalance on the volume %s: Success", self.volname)

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode,
                                             self.volname,
                                             timeout=1800)
        self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
                              "%s", self.volname))
        g.log.info("Rebalance status on volume %s: Complete", self.volname)

        # Check Rebalance status after rebalance is complete
        g.log.info("Checking Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to get rebalance status for the "
                                  "volume %s", self.volname))
        g.log.info("Rebalance status on volume %s: Complete", self.volname)

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO on all mounts: Complete")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("List all files and directories: Success")

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")

        # Checking if there are any migration failures
        status = get_rebalance_status(self.mnode, self.volname)
        for each_node in status['node']:
            self.assertEqual(
                0, int(each_node['failures']),
                "Rebalance failed to migrate few files on %s" %
                each_node['nodeName'])
            g.log.info("No migration failures on %s", each_node['nodeName'])
コード例 #22
0
    def test_create_directory(self):

        g.log.info("creating multiple,multilevel directories")
        m_point = self.mounts[0].mountpoint
        command = 'mkdir -p ' + m_point + '/root_dir/test_dir{1..3}'
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(
            ret, 0,
            "directory creation failed on %s" % self.mounts[0].mountpoint)
        command = 'ls ' + m_point + '/root_dir'
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "ls failed on parent directory:root_dir")
        g.log.info("ls on parent directory: successful")

        g.log.info("creating files at different directory levels inside %s",
                   self.mounts[0].mountpoint)
        command = 'touch ' + m_point + \
            '/root_dir/test_file{1..5} ' + m_point + \
            '/root_dir/test_dir{1..3}/test_file{1..5}'
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation: failed")
        command = 'ls ' + m_point + '/root_dir'
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "can't list the created directories")
        list_of_files_and_dirs = out.split('\n')
        flag = True
        for x_count in range(3):
            dir_name = 'test_dir%d' % (x_count + 1)
            if dir_name not in list_of_files_and_dirs:
                flag = False
        for x_count in range(5):
            file_name = 'test_file%d' % (x_count + 1)
            if file_name not in list_of_files_and_dirs:
                flag = False
        self.assertTrue(
            flag, "ls command didn't list all the "
            "directories and files")
        g.log.info("creation of files at multiple levels successful")

        g.log.info("creating a list of all directories")
        command = 'cd ' + m_point + ';find root_dir -type d -print'
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "creation of directory list failed")
        list_of_all_dirs = out.split('\n')
        del list_of_all_dirs[-1]

        g.log.info("verifying that all the directories are present on "
                   "every brick and the layout ranges are correct")
        flag = validate_files_in_dir(self.clients[0],
                                     m_point + '/root_dir',
                                     test_type=k.TEST_LAYOUT_IS_COMPLETE)
        self.assertTrue(flag, "Layout has some holes or overlaps")
        g.log.info("Layout is completely set")

        g.log.info("Checking if gfid xattr of directories is displayed and"
                   "is same on all the bricks on the server node")
        brick_list = get_all_bricks(self.mnode, self.volname)
        for direc in list_of_all_dirs:
            list_of_gfid = []
            for brick in brick_list:
                # the partition function returns a tuple having 3 elements.
                # the host address, the character passed i.e. ':'
                # , and the brick path
                brick_tuple = brick.partition(':')
                brick_path = brick_tuple[2]
                gfid = get_fattr(brick_tuple[0], brick_path + '/' + direc,
                                 'trusted.gfid')
                list_of_gfid.append(gfid)
            flag = True
            for x_count in range(len(list_of_gfid) - 1):
                if list_of_gfid[x_count] != list_of_gfid[x_count + 1]:
                    flag = False
            self.assertTrue(flag, ("the gfid for the directory %s is not "
                                   "same on all the bricks", direc))
        g.log.info("the gfid for each directory is the same on all the "
                   "bricks")

        g.log.info("Verify that for all directories mount point "
                   "should not display xattr")
        for direc in list_of_all_dirs:
            list_of_xattrs = get_fattr_list(
                self.mounts[0].client_system,
                self.mounts[0].mountpoint + '/' + direc)
            if 'security.selinux' in list_of_xattrs:
                del list_of_xattrs['security.selinux']
            self.assertFalse(
                list_of_xattrs, "one or more xattr being "
                "displayed on mount point")
        g.log.info("Verified : mount point not displaying important " "xattrs")

        g.log.info("Verifying that for all directories only mount point "
                   "shows pathinfo xattr")
        for direc in list_of_all_dirs:
            fattr = get_fattr(self.mounts[0].client_system,
                              self.mounts[0].mountpoint + '/' + direc,
                              'trusted.glusterfs.pathinfo')
            self.assertTrue(fattr, ("pathinfo not displayed for the "
                                    "directory %s on mount point", direc))
        brick_list = get_all_bricks(self.mnode, self.volname)
        for direc in list_of_all_dirs:
            for brick in brick_list:
                host = brick.partition(':')[0]
                brick_path = brick.partition(':')[2]
                fattr = get_fattr(host, brick_path + '/' + direc,
                                  'trusted.glusterfs.pathinfo')
                self.assertIsNone(fattr, "subvolume displaying pathinfo")
        g.log.info("Verified: only mount point showing pathinfo "
                   "for all the directories")
コード例 #23
0
    def test_rename_directory_no_destination_folder(self):
        """Test rename directory with no destination folder"""
        dirs = {
            'initial': '{root}/folder_{client_index}',
            'new_folder': '{root}/folder_renamed{client_index}'
        }

        for mount_index, mount_obj in enumerate(self.mounts):
            client_host = mount_obj.client_system
            mountpoint = mount_obj.mountpoint
            initial_folder = dirs['initial'].format(
                root=mount_obj.mountpoint,
                client_index=mount_index
            )

            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=LAYOUT_IS_COMPLETE,
                                        file_type=FILETYPE_DIRS)
            self.assertTrue(ret, "Expected - Layout is complete")
            g.log.info('Layout is complete')

            # Create source folder on mount point
            self.assertTrue(mkdir(client_host, initial_folder),
                            'Failed creating source directory')
            self.assertTrue(file_exists(client_host, initial_folder))
            g.log.info('Created source directory %s on mount point %s',
                       initial_folder, mountpoint)

            # Create files and directories
            ret = self.create_files(client_host, initial_folder, self.files,
                                    content='Textual content')

            self.assertTrue(ret, 'Unable to create files on mount point')
            g.log.info('Files and directories are created')

            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS)
            self.assertTrue(ret, "Expected - Files and dirs are stored "
                            "on hashed bricks")
            g.log.info('Files and dirs are stored on hashed bricks')

            new_folder_name = dirs['new_folder'].format(
                root=mountpoint,
                client_index=mount_index
            )
            # Check if destination dir does not exist
            self.assertFalse(file_exists(client_host, new_folder_name),
                             'Expected New folder name should not exists')
            # Rename source folder
            ret = move_file(client_host, initial_folder,
                            new_folder_name)
            self.assertTrue(ret, "Rename direcoty failed")
            g.log.info('Renamed directory %s to %s', initial_folder,
                       new_folder_name)

            # Old dir does not exists and destination is presented
            self.assertFalse(file_exists(client_host, initial_folder),
                             '%s should be not listed' % initial_folder)
            g.log.info('The old directory %s does not exists on mount point',
                       initial_folder)
            self.assertTrue(file_exists(client_host, new_folder_name),
                            'Destination dir does not exists %s' %
                            new_folder_name)
            g.log.info('The new folder is presented %s', new_folder_name)

            # Check bricks for source and destination directories
            for brick_item in get_all_bricks(self.mnode, self.volname):
                brick_host, brick_dir = brick_item.split(':')

                initial_folder = dirs['initial'].format(
                    root=brick_dir,
                    client_index=mount_index
                )
                new_folder_name = dirs['new_folder'].format(
                    root=brick_dir,
                    client_index=mount_index
                )

                self.assertFalse(file_exists(brick_host, initial_folder),
                                 "Expected folder %s to be not presented" %
                                 initial_folder)
                self.assertTrue(file_exists(brick_host, new_folder_name),
                                'Expected folder %s to be presented' %
                                new_folder_name)

                g.log.info('The old directory %s does not exists and directory'
                           ' %s is presented', initial_folder, new_folder_name)
        g.log.info('Rename directory when destination directory '
                   'does not exists is successful')
コード例 #24
0
    def test_induce_holes_then_lookup(self):
        """
        Test Script to induce holes in layout by using remove-brick force
        and then performing lookup in order to fix the layout.

        Steps :
        1) Create a volume and mount it using FUSE.
        2) Create a directory "testdir" on mount point.
        3) Check if the layout is complete.
        4) Log volume info and status before remove-brick operation.
        5) Form a list of bricks to be removed.
        6) Start remove-brick operation using 'force'.
        7) Let remove-brick complete and check layout.
        8) Mount the volume on a new mount.
        9) Send a lookup on mount point.
        10) Check if the layout is complete.

        """
        # pylint: disable=too-many-statements
        # Create a directory on mount point
        m_point = self.mounts[0].mountpoint
        dirpath = '/testdir'
        command = 'mkdir -p ' + m_point + dirpath
        ret, _, _ = g.run(self.clients[0], command)
        self.assertEqual(ret, 0, "mkdir failed")
        g.log.info("mkdir is successful")

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    m_point)
        ret = validate_files_in_dir(self.clients[0],
                                    m_point,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")

        # Log Volume Info and Status before shrinking the volume.
        g.log.info("Logging volume info and Status before shrinking volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Form bricks list for Shrinking volume
        self.remove_brick_list = form_bricks_list_to_remove_brick(self.mnode,
                                                                  self.volname,
                                                                  subvol_num=1)
        self.assertNotEqual(self.remove_brick_list, None,
                            ("Volume %s: Failed to form bricks list for volume"
                             " shrink", self.volname))
        g.log.info("Volume %s: Formed bricks list for volume shrink",
                   self.volname)

        # Shrinking volume by removing bricks
        g.log.info("Start removing bricks from volume")
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 self.remove_brick_list, "force")
        self.assertFalse(ret, "Remove-brick with force: FAIL")
        g.log.info("Remove-brick with force: PASS")

        # Check the layout
        ret = is_layout_complete(self.mnode, self.volname, dirpath)
        self.assertFalse(ret, ("Volume %s: Layout is complete", self.volname))
        g.log.info("Volume %s: Layout has some holes", self.volname)

        # Mount the volume on a new mount point
        ret, _, _ = mount_volume(self.volname,
                                 mtype='glusterfs',
                                 mpoint=m_point,
                                 mserver=self.mnode,
                                 mclient=self.clients[1])
        self.assertEqual(ret, 0,
                         ("Failed to do gluster mount of volume %s"
                          " on client node %s", self.volname, self.clients[1]))
        g.log.info("Volume %s mounted successfullly on %s", self.volname,
                   self.clients[1])

        # Send a look up on the directory
        cmd = 'ls %s%s' % (m_point, dirpath)
        ret, _, err = g.run(self.clients[1], cmd)
        self.assertEqual(ret, 0,
                         ("Lookup failed on %s with error %s", (dirpath, err)))
        g.log.info("Lookup sent successfully on %s", m_point + dirpath)

        # DHT Layout validation
        g.log.info("Checking layout after new mount")
        g.log.debug("Verifying hash layout values %s:%s", self.clients[1],
                    m_point + dirpath)
        ret = validate_files_in_dir(self.clients[1],
                                    m_point + dirpath,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")