def test_file_rename_when_source_and_dest_hash_same_subvol(self):
        """
        Case 2:
        - Destination file does not exist
        - Source file is stored on hashed subvolume(s1) it self
        - Destination file should be hashed to same subvolume(s1)
            mv <source_file> <destination_file>
        - Source file should be renamed to destination file
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Create soruce file and Get hashed subvol (s1)
        source_hashed_subvol, count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file such that the new name hashes to a new subvol
        new_hashed = find_specific_hashed(self.subvols, "/",
                                          source_hashed_subvol)
        self.assertIsNotNone(new_hashed,
                             "could not find new hashed for destination file")

        # Rename the source file to the destination file
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(
            ret,
            "Failed to move files {} and {}".format(source_file, dest_file))

        _, rename_count = find_hashed_subvol(self.subvols, "/",
                                             str(new_hashed.newname))
        self.assertEqual(count, rename_count,
                         "The hashed subvols for src and dest are not same.")
    def _create_file_and_get_hashed_subvol(self, file_name):
        """ Creates a file and return its hashed subvol

        Args:
               file_name(str): name of the file to be created
        Returns:
                hashed_subvol object: An object of type BrickDir type
                                    representing the hashed subvolume

                subvol_count: The subvol index in the subvol list

                source_file: Path to the file created

        """
        # pylint: disable=unsubscriptable-object

        # Create Source File
        source_file = "{}/{}".format(self.mount_point, file_name)
        ret, _, err = g.run(self.mounts[0].client_system,
                            ("touch %s" % source_file))
        self.assertEqual(
            ret, 0, ("Failed to create {} : err {}".format(source_file, err)))
        g.log.info("Successfully created the source file")

        # Find the hashed subvol for source file
        source_hashed_subvol, count = find_hashed_subvol(
            self.subvols, "/", file_name)
        self.assertIsNotNone(
            source_hashed_subvol,
            "Couldn't find hashed subvol for the source file")
        return source_hashed_subvol, count, source_file
示例#3
0
    def _check_dirty_xattr(self, filename):
        """Get trusted.ec.dirty xattr value to validate eagerlock behavior"""
        # Find the hashed subvol of the file created
        # for distributed disperse case
        subvols_info = get_subvols(self.mnode, self.volname)
        subvols_info = subvols_info['volume_subvols']
        if len(subvols_info) > 1:
            _, hashed_subvol = find_hashed_subvol(subvols_info, '', filename)
            if hashed_subvol is None:
                g.log.error("Error in finding hash value of %s", filename)
                return None
        else:
            hashed_subvol = 0

        # Collect ec.dirty xattr value from each brick
        result = []
        for subvol in subvols_info[hashed_subvol]:
            host, brickpath = subvol.split(':')
            brickpath = brickpath + '/' + filename
            ret = get_extended_attributes_info(host, [brickpath],
                                               encoding='hex',
                                               attr_name='trusted.ec.dirty')
            ret = ret[brickpath]['trusted.ec.dirty']
            result.append(ret)

        # Check if xattr values are same across all bricks
        if result.count(result[0]) == len(result):
            return ret
        g.log.error(
            "trusted.ec.dirty value is not consistent across the "
            "disperse set %s", result)
        return None
 def _find_cache_for_src_file(self):
     """Find out hash subvol for srcfile which after rename will become
     cache subvol"""
     src_cache_subvol, src_cache_subvol_count = find_hashed_subvol(
         self.subvols, "/", "srcfile")
     self.assertIsNotNone(src_cache_subvol, "Could not find src cached")
     g.log.info("Cached subvol for srcfile is %s", src_cache_subvol._path)
     return src_cache_subvol_count
    def test_dht_file_rename_dest_exists_src_and_dest_hash_same(self):
        """
        Case 7:
        - Destination file should exist
        - Source file is stored on hashed subvolume(s1) it self
        - Destination file should be hashed to same subvolume(s1)
        - Destination file is stored on hashed subvolume
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed to destination file
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Create soruce file and Get hashed subvol (s1)
        source_hashed_subvol, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file that hashes
        # to same subvol (s1)
        new_hashed = find_specific_hashed(self.subvols, "/",
                                          source_hashed_subvol)
        self.assertIsNotNone(
            new_hashed, "Couldn't find a new hashed subvol "
            "for destination file")

        # Create destination_file and get its hashed subvol (should be s1)
        dest_hashed_subvol, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))

        # Verify the subvols are not same for source and destination files
        self.assertEqual(src_count, dest_count,
                         "The subvols for src and dest are not same.")

        # Rename the source file to the destination file
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move files {} and {}".format(source_file, dest_file)))

        # Verify the file move and the destination file is hashed to
        # same subvol or not
        _, rename_count = find_hashed_subvol(self.subvols, "/",
                                             str(new_hashed.newname))
        self.assertEqual(dest_count, rename_count,
                         ("The subvols for source : {} and dest : {} are "
                          "not same.".format(source_hashed_subvol._fqpath,
                                             dest_hashed_subvol._fqpath)))

        # Verify destination file is removed
        ret = self._verify_file_exists(dest_hashed_subvol,
                                       str(new_hashed.newname))
        self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
                              " : {}".format(str(new_hashed.newname),
                                             dest_hashed_subvol._fqpath)))
        g.log.info("The destination file is removed as expected")
示例#6
0
    def mkdir_before_hashdown(self, subvols, parent_dir):
        '''
        case -2:
            - create directory
            - bring down hashed subvol
            - make sure stat is successful on the dir
        '''
        # pylint: disable=protected-access
        # pylint: disable=pointless-string-statement
        # create parent dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        if ret != 0:
            g.log.error('mkdir failed for %s err: %s', parent_dir, err)
            return False
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # find hashed subvol
        hashed_subvol, count = find_hashed_subvol(subvols, "/", "parent")
        if hashed_subvol is None:
            g.log.error('Error in finding hash value')
            return False

        g.log.info("hashed subvol %s", hashed_subvol._host)

        # bring hashed_subvol offline
        ret = bring_bricks_offline(self.volname, subvols[count])
        if ret == 0:
            g.log.error('Error in bringing down subvolume %s', subvols[count])
            return False
        g.log.info('target subvol %s is offline', subvols[count])

        # this confirms both layout and stat of the directory
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint + '/parent_dir',
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "Layout is not complete")
        g.log.info('Layout is complete')

        # bring up the subvol
        ret = bring_bricks_online(self.mnode,
                                  self.volname,
                                  subvols[count],
                                  bring_bricks_online_methods=None)
        if ret == 0:
            g.log.error("Error in bringing back subvol online")
            return False
        g.log.info('Subvol is back online')

        # delete parent_dir
        ret, _, err = g.run(self.clients[0], ("rmdir %s" % parent_dir))
        if ret == 0:
            g.log.error('rmdir failed for %s err: %s', parent_dir, err)
        g.log.info("rmdir of directory %s successful", parent_dir)
        return True
    def test_brick_full_add_brick_remove_brick(self):
        """
        Test case:
        1. Create a volume, start it and mount it.
        2. Fill few bricks till min-free-limit is reached.
        3. Add brick to the volume.(This should pass.)
        4. Set cluster.min-free-disk to 30%.
        5. Remove bricks from the volume.(This should pass.)
        6. Check for data loss by comparing arequal before and after.
        """
        # Fill few bricks till it is full
        bricks = get_all_bricks(self.mnode, self.volname)

        # Calculate the usable size and fill till it reaches
        # min free limit
        usable_size = get_usable_size_per_disk(bricks[0])
        subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
        filename = "abc"
        for _ in range(0, usable_size):
            while (subvols[find_hashed_subvol(subvols, "/",
                                              filename)[1]] == subvols[0]):
                filename = self._get_random_string()
            ret, _, _ = g.run(
                self.mounts[0].client_system,
                "fallocate -l 1G {}/{}".format(self.mounts[0].mountpoint,
                                               filename))
            self.assertFalse(ret, "Failed to fill disk to min free limit")
            filename = self._get_random_string()
        g.log.info("Disk filled up to min free limit")

        # Collect arequal checksum before ops
        arequal_checksum_before = collect_mounts_arequal(self.mounts[0])

        # Add brick to volume
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to add brick on volume %s" % self.volname)

        # Set cluster.min-free-disk to 30%
        ret = set_volume_options(self.mnode, self.volname,
                                 {'cluster.min-free-disk': '30%'})
        self.assertTrue(ret, "Failed to set cluster.min-free-disk to 30%")

        # Remove bricks from the volume
        ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=1800)
        self.assertTrue(ret, "Failed to remove-brick from volume")
        g.log.info("Remove-brick rebalance successful")

        # Check for data loss by comparing arequal before and after ops
        arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
        self.assertEqual(arequal_checksum_before, arequal_checksum_after,
                         "arequal checksum is NOT MATCHNG")
        g.log.info("arequal checksum is SAME")
    def test_remove_brick_with_open_fd(self):
        """
        Test case:
        1. Create volume, start it and mount it.
        2. Open file datafile on mount point and start copying /etc/passwd
           line by line(Make sure that the copy is slow).
        3. Start remove-brick of the subvol to which has datafile is hashed.
        4. Once remove-brick is complete compare the checksum of /etc/passwd
           and datafile.
        """
        # Open file datafile on mount point and start copying /etc/passwd
        # line by line
        ret, out, _ = g.run(self.mounts[0].client_system,
                            "cat /etc/passwd | wc -l")
        self.assertFalse(ret, "Failed to get number of lines of /etc/passwd")
        cmd = ("cd {}; exec 30<> datafile ;for i in `seq 1 {}`; do "
               "head -n $i /etc/passwd | tail -n 1 >> datafile; sleep 10; done"
               .format(self.mounts[0].mountpoint, out.strip()))

        self.list_of_io_processes = [
            g.run_async(self.mounts[0].client_system, cmd)
        ]
        self.is_copy_running = True

        # Start remove-brick of the subvol to which has datafile is hashed
        subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
        number = find_hashed_subvol(subvols, "/", 'datafile')[1]

        ret = shrink_volume(self.mnode, self.volname, subvol_num=number)
        self.assertTrue(ret, "Failed to remove-brick from volume")
        g.log.info("Remove-brick rebalance successful")

        # Validate if I/O was successful or not.
        ret = validate_io_procs(self.list_of_io_processes, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        self.is_copy_running = False

        # Compare md5checksum of /etc/passwd and datafile
        md5_of_orginal_file = get_md5sum(self.mounts[0].client_system,
                                         '/etc/passwd')
        self.assertIsNotNone(md5_of_orginal_file,
                             'Unable to get md5 checksum of orignial file')
        md5_of_copied_file = get_md5sum(
            self.mounts[0].client_system,
            '{}/datafile'.format(self.mounts[0].mountpoint))
        self.assertIsNotNone(md5_of_copied_file,
                             'Unable to get md5 checksum of copied file')
        self.assertEqual(
            md5_of_orginal_file.split(" ")[0],
            md5_of_copied_file.split(" ")[0],
            "md5 checksum of original and copied file didn't"
            " match")
        g.log.info("md5 checksum of original and copied files are same")
示例#9
0
 def _get_hashed_subvol_index(self, subvols):
     """
     Return `index` of hashed_volume from list of subvols
     """
     index = 0
     if self.volume_type.find('distributed') >= 0:
         hashed_subvol, index = find_hashed_subvol(
             subvols, '',
             self.file_path.rsplit('/', 1)[1])
         self.assertIsNotNone(hashed_subvol,
                              'Unable to find hashed subvolume')
     return index
    def test_custom_xattr_with_subvol_down_dir_doesnt_exists(self):
        """
        Description:
        Steps:
        1) Bring one or more(not all) dht sub-volume(s) down by killing
           processes on that server
        2) Create a directory from mount point such that it
           hashes to up subvol.
        3) Create a custom xattr for dir
           # setfattr -n user.foo -v bar2 <dir>
        4) Verify that custom xattr for directory is displayed on mount point
           and bricks for directory
           # getfattr -n user.foo <dir>
           # getfattr -n user.foo <brick_path>/<dir>
        5) Modify custom xattr value and verify that custom xattr for directory
           is displayed on mount point and all up bricks
           # setfattr -n user.foo -v ABC <dir>
        6) Verify that custom xattr is not displayed once you remove it on
           mount point and all up bricks
        7) Verify that mount point shows pathinfo xattr for dir
        8) Again create a custom xattr for dir
           # setfattr -n user.foo -v star1 <dir>
        9) Bring up the sub-volumes
        10) Execute lookup on parent directory of both <dir> from mount point
        11) Verify Custom extended attributes for dir1 on all bricks
        """
        # Get subvol list
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        self.assertIsNotNone(subvols, "Failed to get subvols")

        # Find out the hashed subvol for dir1
        hashed_subvol, subvol_count = find_hashed_subvol(subvols, "/", "dir1")
        self.assertIsNotNone(hashed_subvol, "Could not find srchashed")
        g.log.info("Hashed subvol for dir1 is %s", hashed_subvol._path)

        # Remove the hashed_subvol from subvol list
        subvols.remove(subvols[subvol_count])

        # Bring down a dht subvol
        ret = bring_bricks_offline(self.volname, subvols[0])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[0]))
        g.log.info('DHT subvol %s is offline', subvols[0])

        # Create the dir1
        self._create_dir(dir_name="dir1")

        # Calling the local function
        self._create_xattr_check_self_heal()
    def test_copy_new_dir_dest_subvol_up(self):
        """
        Case 3:
        - Copy dir ---> Bring down dht sub-volume where destination
          directory should not hash to down sub-volume
        - Create files and directories from mount point.
        - copy dir and make sure destination dir does not exist
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        m_point = self.mounts[0].mountpoint

        # Get subvols
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        self.assertIsNotNone(subvols, "Failed to get subvols")

        # Find out hashed brick/subvol for src dir
        src_subvol, src_count = find_hashed_subvol(subvols, "/", "src_dir")
        self.assertIsNotNone(src_subvol, "Could not find srchashed")
        g.log.info("Hashed subvol for src_dir is %s", src_subvol._path)

        # Find out the destination dir name such that it hashes to
        # different subvol
        newdir = find_new_hashed(subvols, "/", "src_dir")
        dest_dir = str(newdir.newname)
        dest_count = newdir.subvol_count

        # Remove the hashed subvol for dest and src dir from the
        # subvol list
        for item in (subvols[src_count], subvols[dest_count]):
            subvols.remove(item)

        # Bring down a dht subvol
        ret = bring_bricks_offline(self.volname, subvols[0])
        self.assertTrue(ret,
                        ('Error in bringing down subvolume %s', subvols[0]))
        g.log.info('DHT subvol %s is offline', subvols[0])

        # Create source dir and create files inside it
        self._create_src(m_point)

        # Create files on source dir and
        # perform copy of src_dir to dest_dir
        self._copy_files_check_contents(m_point, dest_dir)
示例#12
0
 def _filecreate_and_hashcheck(self, timeoutval):
     """Create a file and check on which subvol it is hashed to"""
     # Create and write to a file to test the eagerlock timeout behavior
     objectname = 'EagerLockTimeoutCheck-file-' + timeoutval
     objectpath = ("{}/{}".format(self.mounts[0].mountpoint, objectname))
     ret = append_string_to_file(self.mounts[0].client_system, objectpath,
                                 'EagerLockTest')
     self.assertTrue(ret, 'create and append of %s failed' % objectname)
     ret = get_subvols(self.mnode, self.volname)
     # Find the hashed subvol of the file created
     if len(ret['volume_subvols']) > 1:
         _, hashed_subvol = find_hashed_subvol(ret['volume_subvols'], '',
                                               objectname)
         if hashed_subvol is None:
             g.log.error("Error in finding hash value of %s", objectname)
             return None
         return (objectname, ret['volume_subvols'], hashed_subvol)
     # Set subvol to 0 for plain(non-distributed) disperse volume
     hashed_subvol = 0
     return (objectname, ret['volume_subvols'], hashed_subvol)
    def test_file_access(self):
        """
        Test file access.
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals
        # pylint: disable=too-many-statements
        mount_obj = self.mounts[0]
        mountpoint = mount_obj.mountpoint

        # get subvol list
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        self.assertIsNotNone(subvols, "failed to get subvols")

        # create a file
        srcfile = mountpoint + '/testfile'
        ret, _, err = g.run(self.clients[0], ("touch %s" % srcfile))
        self.assertEqual(ret, 0, ("File creation failed for %s err %s",
                                  srcfile, err))
        g.log.info("testfile creation successful")

        # find hashed subvol
        srchashed, scount = find_hashed_subvol(subvols, "/", "testfile")
        self.assertIsNotNone(srchashed, "could not find srchashed")
        g.log.info("hashed subvol for srcfile %s subvol count %s",
                   srchashed._host, str(scount))

        # rename the file such that the new name hashes to a new subvol
        tmp = find_new_hashed(subvols, "/", "testfile")
        self.assertIsNotNone(tmp, "could not find new hashed for dstfile")
        g.log.info("dst file name : %s dst hashed_subvol : %s "
                   "subvol count : %s", tmp.newname,
                   tmp.hashedbrickobject._host, str(tmp.subvol_count))

        dstname = str(tmp.newname)
        dstfile = mountpoint + "/" + dstname
        dsthashed = tmp.hashedbrickobject
        dcount = tmp.subvol_count
        ret, _, err = g.run(self.clients[0], ("mv %s %s" %
                                              (srcfile, dstfile)))
        self.assertEqual(ret, 0, ("rename failed for %s err %s",
                                  srcfile, err))
        g.log.info("cmd: mv srcfile dstfile successful")

        # check that on dsthash_subvol the file is a linkto file
        filepath = dsthashed._fqpath + "/" + dstname
        file_stat = get_file_stat(dsthashed._host, filepath)
        self.assertEqual(file_stat['access'], "1000", ("Expected file "
                                                       "permission to be 1000"
                                                       " on subvol %s",
                                                       dsthashed._host))
        g.log.info("dsthash_subvol has the expected linkto file")

        # check on srchashed the file is a data file
        filepath = srchashed._fqpath + "/" + dstname
        file_stat = get_file_stat(srchashed._host, filepath)
        self.assertNotEqual(file_stat['access'], "1000", ("Expected file "
                                                          "permission not to"
                                                          "be 1000 on subvol"
                                                          "%s",
                                                          srchashed._host))

        # Bring down the hashed subvol of dstfile(linkto file)
        ret = bring_bricks_offline(self.volname, subvols[dcount])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[dcount]))
        g.log.info('dst subvol %s is offline', subvols[dcount])

        # Need to access the file through a fresh lookup through a new mount
        # create a new dir(choosing server to do a mount)
        ret, _, _ = g.run(self.mnode, ("mkdir -p /mnt"))
        self.assertEqual(ret, 0, ('mkdir of mount dir failed'))
        g.log.info("mkdir of mount dir succeeded")

        # do a temp mount
        ret = mount_volume(self.volname, self.mount_type, "/mnt",
                           self.mnode, self.mnode)
        self.assertTrue(ret, ('temporary mount failed'))
        g.log.info("temporary mount succeeded")

        # check that file is accessible (stat)
        ret, _, _ = g.run(self.mnode, ("stat /mnt/%s" % dstname))
        self.assertEqual(ret, 0, ('stat error on for dst file %s', dstname))
        g.log.info("stat on /mnt/%s successful", dstname)

        # cleanup temporary mount
        ret = umount_volume(self.mnode, "/mnt")
        self.assertTrue(ret, ('temporary mount failed'))
        g.log.info("umount successful")

        # Bring up the hashed subvol
        ret = bring_bricks_online(self.mnode, self.volname, subvols[dcount],
                                  bring_bricks_online_methods=None)
        self.assertTrue(ret, "Error in bringing back subvol online")
        g.log.info('Subvol is back online')

        # now bring down the cached subvol
        ret = bring_bricks_offline(self.volname, subvols[scount])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[scount]))
        g.log.info('target subvol %s is offline', subvols[scount])

        # file access should fail
        ret, _, _ = g.run(self.clients[0], ("stat %s" % dstfile))
        self.assertEqual(ret, 1, ('stat error on for file %s', dstfile))
        g.log.info("dstfile access failed as expected")
示例#14
0
    def test_one_brick_full_add_brick_rebalance(self):
        """
        Test case:
        1. Create a pure distribute volume with 3 bricks.
        2. Start it and mount it on client.
        3. Fill one disk of the volume till it's full
        4. Add brick to volume, start rebalance and wait for it to complete.
        5. Check arequal checksum before and after add brick should be same.
        6. Check if link files are present on bricks or not.
        """
        # Fill few bricks till it is full
        bricks = get_all_bricks(self.mnode, self.volname)

        # Calculate the usable size and fill till it reaches
        # min free limit
        usable_size = get_usable_size_per_disk(bricks[0])
        subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
        fname = "abc"

        # Create directories in hierarchy
        dirp = "/dir1/dir2/"
        path = "{}{}".format(self.mounts[0].mountpoint, dirp)
        ret = mkdir(self.mounts[0].client_system, path, parents=True)
        self.assertTrue(ret, "Failed to create dir hierarchy")

        for _ in range(0, usable_size):

            # Create files inside directories
            while (subvols[find_hashed_subvol(subvols, dirp, fname)[1]][0] !=
                   subvols[0][0]):
                fname = self._get_random_string()
            ret, _, _ = g.run(self.mounts[0].client_system,
                              "fallocate -l 1G {}{}".format(path, fname))
            self.assertFalse(ret, "Failed to fill disk to min free limit")
            fname = self._get_random_string()
        g.log.info("Disk filled up to min free limit")

        # Collect arequal checksum before ops
        arequal_checksum_before = collect_mounts_arequal(self.mounts[0])

        # Add brick to volume
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to add brick on volume %s"
                        % self.volname)

        # Trigger rebalance and wait for it to complete
        ret, _, _ = rebalance_start(self.mnode, self.volname,
                                    force=True)
        self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
                         % self.volname)

        # Wait for rebalance to complete
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
                                             timeout=1800)
        self.assertTrue(ret, "Rebalance is not yet complete on the volume "
                             "%s" % self.volname)
        g.log.info("Rebalance successfully completed")

        # Check for data loss by comparing arequal before and after ops
        arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
        self.assertEqual(arequal_checksum_before, arequal_checksum_after,
                         "arequal checksum is NOT MATCHNG")
        g.log.info("arequal checksum is SAME")

        # Check if linkto files exist or not as rebalance is already
        # completed we shouldn't be seeing any linkto files
        for brick in bricks:
            node, path = brick.split(":")
            path += dirp
            list_of_files = get_dir_contents(node, path)
            self.assertIsNotNone(list_of_files, "Unable to get files")
            for filename in list_of_files:
                ret = get_dht_linkto_xattr(node, "{}{}".format(path,
                                                               filename))
                self.assertIsNone(ret, "Unable to fetch dht linkto xattr")
    def test_rmdir_dir_when_hash_nonhash_vol_down(self):
        """
        case -2:
        - create dir1 and dir2
        - bring down hashed subvol for dir1
        - bring down a non-hashed subvol for dir2
        - rmdir dir1 should fail with ENOTCONN
        - rmdir dir2 should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Create dir1 and dir2
        directory_list = []
        for number in range(1, 3):
            directory_list.append('{}/dir{}'.format(self.mountpoint, number))
            ret = mkdir(self.clients[0], directory_list[-1])
            self.assertTrue(ret, ('mkdir failed for %s '
                                  % directory_list[-1]))
            g.log.info("mkdir of directory %s successful",
                       directory_list[-1])

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols, "/",
                                                        "dir1")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir1 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[0])
        self.assertFalse(ret, ('Expected rmdir to fail for %s'
                               % directory_list[0]))
        g.log.info("rmdir of directory %s failed as expected",
                   directory_list[0])

        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Unmounting and Mounting the volume back to Heal
        ret, _, err = umount_volume(self.clients[1], self.mountpoint)
        self.assertFalse(ret, "Error in creating temp mount %s" % err)

        ret, _, err = mount_volume(self.volname,
                                   mtype='glusterfs',
                                   mpoint=self.mountpoint,
                                   mserver=self.servers[0],
                                   mclient=self.clients[1])
        self.assertFalse(ret, "Error in creating temp mount")

        ret, _, _ = g.run(self.clients[1], ("ls %s/dir1" % self.mountpoint))
        self.assertEqual(ret, 0, "Error in lookup for dir1")
        g.log.info("lookup successful for dir1")

        # This confirms that healing is done on dir1
        ret = validate_files_in_dir(self.clients[0],
                                    directory_list[0],
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "validate_files_in_dir for dir1 failed")
        g.log.info("healing successful for dir1")

        # Bring down the hashed subvol
        # Find a hashed subvolume(or brick)
        hashed_subvol, count = find_hashed_subvol(self.subvols, "/", "dir2")
        self.assertIsNotNone(hashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("hashed_subvol %s", hashed_subvol._host)

        # Bring hashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir2 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[1])
        self.assertFalse(ret, ('Expected rmdir to fail for %s'
                               % directory_list[1]))
        g.log.info("rmdir of dir2 directory %s failed as expected",
                   directory_list[1])

        # Cleanup
        # Bring up the subvol - restart the volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Delete dirs
        for directory in directory_list:
            ret = rmdir(self.clients[0], directory)
            self.assertTrue(ret, ('rmdir failed for %s ' % directory))
            g.log.info("rmdir of directory %s successful", directory)
    def test_file_rename_when_dest_not_hash_to_src_or_src_link_subvol(self):
        """
        Case 3:
        - Destination file does not exist
        - Source link file is stored on hashed sub volume(s1) and Source
          file is stored on another subvolume(s2)
        - Destination file should be hashed to some other subvolume(s3)
          (should not be same subvolumes mentioned in above condition)
             mv <source_file> <destination_file>
        - Source file should be ranamed to destination file
        - source link file should be removed.
        - Destination link file should be created on its hashed
          subvolume(s3)
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals
        # pylint: disable=unsubscriptable-object

        # Find a non hashed subvolume(or brick)
        # Create soruce file and Get hashed subvol (s2)
        _, count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file to create link in hashed subvol -(s1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "could not find new hashed for dstfile")
        count2 = new_hashed.subvol_count
        # Rename the source file to the new file name
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The hashed subvol {} doesn't have the "
                  "expected linkto file: {}".format(src_link_subvol._fqpath,
                                                    str(new_hashed.newname))))

        # find a subvol (s3) other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (count, count2):
                subvol_new = brickdir
                break

        new_hashed2 = find_specific_hashed(self.subvols, "/", subvol_new)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Rename the source file to the destination file
        source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        hashed_subvol_after_rename, rename_count = (find_hashed_subvol(
            self.subvols, "/", str(new_hashed2.newname)))
        self.assertNotEqual(count2, rename_count,
                            "The subvols for src and dest are same.")

        # check that the source link file is removed.
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(
            ret, ("The New hashed volume {} still have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))
        g.log.info("The source link file is removed")

        # Check Destination link file is created on its hashed sub-volume(s3)
        ret = self._verify_link_file_exists(hashed_subvol_after_rename,
                                            str(new_hashed2.newname))
        self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
                              "expected linkto file {}".format(
                                  hashed_subvol_after_rename._fqpath,
                                  str(new_hashed2.newname))))
        g.log.info("Destinaion link is created in desired subvol")
    def test_file_rename_when_src_link_and_dest_file_hash_same_subvol(self):
        """
        Case 5:
       - Destination file does not exist
       - Source link file is stored on hashed sub volume(s1) and Source
         file is stored on another subvolume(s2)
       - Destination file should be hashed to same subvolume(s1)
            mv <source_file> <destination_file>
       - Source file should be renamed to destination file
       - Source link file should be removed.
       - Destination link file should be created on its
         hashed subvolume(s1)
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Get hashed subvol s2)
        _, count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file to create link in another subvol - (s1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed, ("could not find new hashed subvol "
                                          "for {}".format(source_file)))

        self.assertNotEqual(count, new_hashed.subvol_count,
                            "New file should hash to different sub-volume")

        # Rename the source file to the new file name
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The New hashed volume {} doesn't have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))

        # Get a file name to hash to the subvol s1
        new_hashed2 = find_specific_hashed(self.subvols, "/", src_link_subvol,
                                           new_hashed.newname)
        self.assertIsNotNone(new_hashed2,
                             ("Couldn't find a name hashed to the"
                              " given subvol {}".format(src_link_subvol)))

        _, rename_count = (find_hashed_subvol(self.subvols, "/",
                                              str(new_hashed2.newname)))
        self.assertEqual(new_hashed.subvol_count, rename_count,
                         "The subvols for src and dest are not same.")

        # Move the source file to the new file name
        source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(ret, "Failed to move file")

        # check that the source link file is removed.
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(
            ret, ("The hashed volume {} still have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))
        g.log.info("The source link file is removed")

        # Check Destination link file is created on its hashed sub-volume(s1)
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(
            ret, ("The New hashed volume {} doesn't have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed2.newname))))
        g.log.info("Destinaion link is created in desired subvol")
    def test_mkdir_with_subvol_down(self):
        '''
        Test mkdir hashed to a down subvol
        '''
        # pylint: disable=too-many-locals
        # pylint: disable=too-many-branches
        # pylint: disable=too-many-statements
        # pylint: disable=W0212
        mount_obj = self.mounts[0]
        mountpoint = mount_obj.mountpoint

        # directory that needs to be created
        parent_dir = mountpoint + '/parent'
        child_dir = mountpoint + '/parent/child'

        # get hashed subvol for name "parent"
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        hashed, count = find_hashed_subvol(subvols, "/", "parent")
        self.assertIsNotNone(hashed, "Could not find hashed subvol")

        # bring target_brick offline
        bring_bricks_offline(self.volname, subvols[count])
        ret = are_bricks_offline(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s', subvols[count]))
        g.log.info('target subvol is offline')

        # create parent dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        self.assertNotEqual(
            ret, 0, ('Expected mkdir of %s to fail with %s', parent_dir, err))
        g.log.info('mkdir of dir %s failed as expected', parent_dir)

        # check that parent_dir does not exist on any bricks and client
        brickobject = create_brickobjectlist(subvols, "/")
        for brickdir in brickobject:
            adp = "%s/parent" % brickdir.path
            bpath = adp.split(":")
            self.assertTrue(
                (file_exists(brickdir._host, bpath[1])) == 0,
                ('Expected dir %s not to exist on servers', parent_dir))

        for client in self.clients:
            self.assertTrue(
                (file_exists(client, parent_dir)) == 0,
                ('Expected dir %s not to exist on clients', parent_dir))

        g.log.info('dir %s does not exist on mount as expected', parent_dir)

        # Bring up the subvols and create parent directory
        bring_bricks_online(self.mnode,
                            self.volname,
                            subvols[count],
                            bring_bricks_online_methods=None)
        ret = are_bricks_online(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ("Error in bringing back subvol %s online", subvols[count]))
        g.log.info('Subvol is back online')

        ret, _, _ = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        self.assertEqual(ret, 0,
                         ('Expected mkdir of %s to succeed', parent_dir))
        g.log.info('mkdir of dir %s successful', parent_dir)

        # get hash subvol for name "child"
        hashed, count = find_hashed_subvol(subvols, "parent", "child")
        self.assertIsNotNone(hashed, "Could not find hashed subvol")

        # bring target_brick offline
        bring_bricks_offline(self.volname, subvols[count])
        ret = are_bricks_offline(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s', subvols[count]))
        g.log.info('target subvol is offline')

        # create child dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % child_dir))
        self.assertNotEqual(
            ret, 0, ('Expected mkdir of %s to fail with %s', child_dir, err))
        g.log.info('mkdir of dir %s failed', child_dir)

        # check if child_dir exists on any bricks
        for brickdir in brickobject:
            adp = "%s/parent/child" % brickdir.path
            bpath = adp.split(":")
            self.assertTrue(
                (file_exists(brickdir._host, bpath[1])) == 0,
                ('Expected dir %s not to exist on servers', child_dir))
        for client in self.clients:
            self.assertTrue((file_exists(client, child_dir)) == 0)

        g.log.info('dir %s does not exist on mount as expected', child_dir)
示例#19
0
    def test_brick_full_add_brick_rebalance(self):
        """
        Test case:
        1. Create a volume, start it and mount it.
        2. Create a data set on the client node such that all the available
           space is used and "No space left on device" error is generated.
        3. Set cluster.min-free-disk to 30%.
        4. Add bricks to the volume, trigger rebalance and wait for rebalance
           to complete.
        """
        # Create a data set on the client node such that all the available
        # space is used and "No space left on device" error is generated
        bricks = get_all_bricks(self.mnode, self.volname)

        # Calculate the usable size and fill till it reaches
        # min free limit
        usable_size = get_usable_size_per_disk(bricks[0])
        subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
        filename = "abc"
        for subvol in subvols:
            while (subvols[find_hashed_subvol(subvols, "/", filename)[1]] ==
                   subvol):
                filename = self._get_random_string()
            ret, _, err = g.run(self.mounts[0].client_system,
                                "fallocate -l {}G {}/{}".format(
                                    usable_size, self.mounts[0].mountpoint,
                                    filename))
            err_msg = 'No space left on device'
            if ret and err_msg in err:
                ret = 0
            self.assertFalse(ret, "Failed to fill disk to min free limit")
        g.log.info("Disk filled up to min free limit")

        # Try to perfrom I/O from mount point(This should fail)
        ret, _, _ = g.run(self.mounts[0].client_system,
                          "fallocate -l 5G {}/mfile".format(
                              self.mounts[0].mountpoint))
        self.assertTrue(ret,
                        "Unexpected: Able to do I/O even when disks are "
                        "filled to min free limit")
        g.log.info("Expected: Unable to perfrom I/O as min free disk is hit")

        # Set cluster.min-free-disk to 30%
        ret = set_volume_options(self.mnode, self.volname,
                                 {'cluster.min-free-disk': '30%'})
        self.assertTrue(ret, "Failed to set cluster.min-free-disk to 30%")

        # Add brick to volume
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to add brick on volume %s"
                        % self.volname)

        # Trigger rebalance and wait for it to complete
        ret, _, _ = rebalance_start(self.mnode, self.volname,
                                    force=True)
        self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
                         % self.volname)

        # Wait for rebalance to complete
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
                                             timeout=1200)
        self.assertTrue(ret, "Rebalance is not yet complete on the volume "
                             "%s" % self.volname)
        g.log.info("Rebalance successfully completed")