def test_file_rename_dest_exist_and_not_hash_src_srclink_subvol(self):
        """
        Case 8:
        - Destination file should exist
        - Source file is hashed sub volume(s1) and
          cached on another subvolume(s2)
        - Destination file should be hashed to some other subvolume(s3)
          (should not be same subvolumes mentioned in above condition)
             mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Souce hashed file should be removed
        - Destination hashed file should be created on its hashed subvolume(s3)
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals
        # pylint: disable=unsubscriptable-object

        # Find a non hashed subvolume(or brick)
        # Create soruce file and Get hashed subvol (s2)
        _, count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file to create link in hashed subvol -(s1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "could not find new hashed for dstfile")
        count2 = new_hashed.subvol_count
        # Rename the source file to the new file name
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The hashed subvol {} doesn't have the "
                  "expected linkto file: {}".format(src_link_subvol._fqpath,
                                                    str(new_hashed.newname))))

        # Find a subvol (s3) other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (count, count2):
                subvol_new = brickdir
                break

        new_hashed2 = find_specific_hashed(self.subvols, "/", subvol_new)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Create destination file in a new subvol (s3)
        dest_hashed_subvol, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))

        # Verify the subvol is not same as S1 or S2
        self.assertNotEqual(
            count2, dest_count,
            ("The subvols for src :{} and dest : {} are same.".format(
                count2, dest_count)))
        # Verify the subvol is not same as S1 or S2
        self.assertNotEqual(
            count, dest_count,
            ("The subvols for src :{} and dest : {} are same.".format(
                count, dest_count)))

        # Rename the source file to the destination file
        source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # Verify destination file is removed
        ret = self._verify_file_exists(dest_hashed_subvol,
                                       str(new_hashed2.newname))
        self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
                              " : {}".format(str(new_hashed.newname),
                                             dest_hashed_subvol._fqpath)))
        g.log.info("The destination file is removed as expected")

        # Check that the source link file is removed.
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(
            ret, ("The New hashed volume {} still have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))
        g.log.info("The source link file is removed")

        # Check Destination link file is created on its hashed sub-volume(s3)
        ret = self._verify_link_file_exists(dest_hashed_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(
            ret, ("The New hashed volume {} doesn't have the "
                  "expected linkto file {}".format(dest_hashed_subvol._fqpath,
                                                   str(new_hashed2.newname))))
        g.log.info("Destinaion link is created in desired subvol")
コード例 #2
0
    def test_mkdir_with_subvol_down(self):
        '''
        Test mkdir hashed to a down subvol
        '''
        # pylint: disable=too-many-locals
        # pylint: disable=too-many-branches
        # pylint: disable=too-many-statements
        # pylint: disable=W0212
        mount_obj = self.mounts[0]
        mountpoint = mount_obj.mountpoint

        # directory that needs to be created
        parent_dir = mountpoint + '/parent'
        child_dir = mountpoint + '/parent/child'

        # get hashed subvol for name "parent"
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        hashed, count = find_hashed_subvol(subvols, "/", "parent")
        self.assertIsNotNone(hashed, "Could not find hashed subvol")

        # bring target_brick offline
        bring_bricks_offline(self.volname, subvols[count])
        ret = are_bricks_offline(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s', subvols[count]))
        g.log.info('target subvol is offline')

        # create parent dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        self.assertNotEqual(
            ret, 0, ('Expected mkdir of %s to fail with %s', parent_dir, err))
        g.log.info('mkdir of dir %s failed as expected', parent_dir)

        # check that parent_dir does not exist on any bricks and client
        brickobject = create_brickobjectlist(subvols, "/")
        for brickdir in brickobject:
            adp = "%s/parent" % brickdir.path
            bpath = adp.split(":")
            self.assertTrue(
                (file_exists(brickdir._host, bpath[1])) == 0,
                ('Expected dir %s not to exist on servers', parent_dir))

        for client in self.clients:
            self.assertTrue(
                (file_exists(client, parent_dir)) == 0,
                ('Expected dir %s not to exist on clients', parent_dir))

        g.log.info('dir %s does not exist on mount as expected', parent_dir)

        # Bring up the subvols and create parent directory
        bring_bricks_online(self.mnode,
                            self.volname,
                            subvols[count],
                            bring_bricks_online_methods=None)
        ret = are_bricks_online(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ("Error in bringing back subvol %s online", subvols[count]))
        g.log.info('Subvol is back online')

        ret, _, _ = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        self.assertEqual(ret, 0,
                         ('Expected mkdir of %s to succeed', parent_dir))
        g.log.info('mkdir of dir %s successful', parent_dir)

        # get hash subvol for name "child"
        hashed, count = find_hashed_subvol(subvols, "parent", "child")
        self.assertIsNotNone(hashed, "Could not find hashed subvol")

        # bring target_brick offline
        bring_bricks_offline(self.volname, subvols[count])
        ret = are_bricks_offline(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s', subvols[count]))
        g.log.info('target subvol is offline')

        # create child dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % child_dir))
        self.assertNotEqual(
            ret, 0, ('Expected mkdir of %s to fail with %s', child_dir, err))
        g.log.info('mkdir of dir %s failed', child_dir)

        # check if child_dir exists on any bricks
        for brickdir in brickobject:
            adp = "%s/parent/child" % brickdir.path
            bpath = adp.split(":")
            self.assertTrue(
                (file_exists(brickdir._host, bpath[1])) == 0,
                ('Expected dir %s not to exist on servers', child_dir))
        for client in self.clients:
            self.assertTrue((file_exists(client, child_dir)) == 0)

        g.log.info('dir %s does not exist on mount as expected', child_dir)
コード例 #3
0
    def test_afr_dir_entry_creation_with_subvol_down(self):
        """
        1. Create a distributed-replicated(3X3)/distributed-arbiter(3X(2+1))
           and mount it on one client
        2. Kill 3 bricks corresponding to the 1st subvol
        3. Unmount and remount the volume on the same client
        4. Create deep dir from mount point
           mkdir -p dir1/subdir1/deepdir1
        5. Create files under dir1/subdir1/deepdir1; touch <filename>
        6. Now bring all sub-vols up by volume start force
        7. Validate backend bricks for dir creation, the subvol which is
           offline will have no dirs created, whereas other subvols will have
           dirs created from step 4
        8. Trigger heal from client by "#find . | xargs stat"
        9. Verify that the directory entries are created on all back-end bricks
        10. Create new dir (dir2) on location dir1/subdir1/deepdir1
        11. Trigger rebalance and wait for the completion
        12. Check backend bricks for all entries of dirs
        13. Check if files are getting created on the subvol which was offline
        """
        # Bring down first subvol of bricks offline
        self.subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
        first_subvol = self.subvols[0]
        ret = bring_bricks_offline(self.volname, first_subvol)
        self.assertTrue(
            ret, "Unable to bring {} bricks offline".format(first_subvol))

        # Check bricks are offline or not
        ret = are_bricks_offline(self.mnode, self.volname, first_subvol)
        self.assertTrue(ret, "Bricks {} are still online".format(first_subvol))

        # Unmount and remount the volume
        ret, _, _ = umount_volume(self.mounts[0].client_system,
                                  self.mounts[0].mountpoint)
        self.assertFalse(ret, "Failed to unmount volume.")
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mounts[0].mountpoint, self.mnode,
                                 self.mounts[0].client_system)
        self.assertFalse(ret, "Failed to remount volume.")
        g.log.info('Successfully umounted and remounted volume.')

        # At this step, sleep is must otherwise file creation will fail
        sleep(2)

        # Create dir `dir1/subdir1/deepdir1` on mountpont
        directory1 = "dir1/subdir1/deepdir1"
        path = self.mounts[0].mountpoint + "/" + directory1
        ret = mkdir(self.mounts[0].client_system, path, parents=True)
        self.assertTrue(ret, "Directory {} creation failed".format(path))

        # Create files on the 2nd and 3rd subvols which are online
        brickobject = create_brickobjectlist(self.subvols, directory1)
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        self._create_number_of_files_on_the_subvol(brickobject[1],
                                                   directory1,
                                                   5,
                                                   mountpath=path)
        self._create_number_of_files_on_the_subvol(brickobject[2],
                                                   directory1,
                                                   5,
                                                   mountpath=path)

        # Bring bricks online using volume start force
        ret, _, err = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, err)
        g.log.info("Volume: %s started successfully", self.volname)

        # Check all bricks are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, "Few process after volume start are offline for "
            "volume: {}".format(self.volname))

        # Validate Directory is not created on the bricks of the subvol which
        # is offline
        for subvol in self.subvols:
            self._check_file_exists(subvol,
                                    "/" + directory1,
                                    exists=(subvol != first_subvol))

        # Trigger heal from the client
        cmd = "cd {}; find . | xargs stat".format(self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, err)

        # Validate the directory1 is present on all the bricks
        for subvol in self.subvols:
            self._check_file_exists(subvol, "/" + directory1, exists=True)

        # Create new dir (dir2) on location dir1/subdir1/deepdir1
        directory2 = "/" + directory1 + '/dir2'
        path = self.mounts[0].mountpoint + directory2
        ret = mkdir(self.mounts[0].client_system, path, parents=True)
        self.assertTrue(ret, "Directory {} creation failed".format(path))

        # Trigger rebalance and validate the completion
        ret, _, err = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, err)
        g.log.info("Rebalance on volume %s started successfully", self.volname)
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(
            ret, "Rebalance didn't complete on the volume: "
            "{}".format(self.volname))

        # Validate all dirs are present on all bricks in each subvols
        for subvol in self.subvols:
            for each_dir in ("/" + directory1, directory2):
                self._check_file_exists(subvol, each_dir, exists=True)

        # Validate if files are getting created on the subvol which was
        # offline
        self._create_number_of_files_on_the_subvol(brickobject[0],
                                                   directory1,
                                                   5,
                                                   mountpath=path)
コード例 #4
0
    def test_rm_file_when_nonhash_vol_down(self):
        """
        case -3:
        - create parent
        - mkdir parent/child
        - touch parent/child/file
        - bringdown a subvol where file is not present
        - rm -rf parent
            - Only file should be deleted
            - rm -rf of parent should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Find a non hashed subvolume(or brick)
        # Create parent dir
        parent_dir = self.mountpoint + '/parent'
        child_dir = parent_dir + '/child'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % parent_dir))
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # Create child dir
        ret = mkdir(self.clients[0], child_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % child_dir))
        g.log.info("mkdir of child directory %s successful", child_dir)

        # Create a file under child_dir
        file_one = child_dir + '/file_one'
        ret, _, err = g.run(self.clients[0], ("touch %s" % file_one))
        self.assertFalse(ret, ('touch failed for %s err: %s' %
                               (file_one, err)))

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
                                                        "parent/child",
                                                        "file_one")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rm -rf' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info("rmdir of parent directory %s failed as expected"
                   " with err %s", parent_dir, err)

        brickobject = create_brickobjectlist(self.subvols, "parent/child")
        self.assertIsNotNone(brickobject,
                             "could not create brickobject list")
        # Make sure file_one is deleted
        for brickdir in brickobject:
            dir_path = "%s/parent/child/file_one" % brickdir.path
            brick_path = dir_path.split(":")
            self.assertTrue((file_exists(brickdir._host, brick_path[1])) == 0,
                            ('Expected file %s not to exist on servers'
                             % parent_dir))
        g.log.info("file is deleted as expected")

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success.')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
コード例 #5
0
    def test_file_rename_when_dest_hash_src_hashed_but_cache_diff(self):
        """
        - Destination file should exist
        - Source file is stored on hashed subvolume it self
        - Destination file should be hased to some other subvolume(s2)
        - Destination file hased on subvolume(s2) but cached on some other
          subvolume(s3)(neither s1 nor s2)
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be there on hashed subvolume and
          should link to new destination file
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create source file and Get hashed subvol (s1)
        _, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination to hash to some subvol S3
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "couldn't find new hashed for destination file")

        # Create Dest file in S3
        dest_cached, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))

        # Verify S1 and S3 are not same
        self.assertNotEqual(src_count, dest_count,
                            ("The destination file is cached to the source "
                             "cached subvol"))

        # Find new name for dest file, that it hashes to some other subvol S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, dest_count):
                subvol_new = brickdir
                break

        dest_hashed = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)
        # Move dest to new name
        dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
        ret = move_file(self.mounts[0].client_system, dest_file, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(dest_file, dest)))

        # Move Source file to Dest
        ret = move_file(self.mounts[0].client_system, source_file, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(source_file, dest)))

        # Verify Destination File is removed
        ret = self._verify_file_exists(dest_cached,
                                       str(new_hashed.newname))
        self.assertFalse(ret, "The Source file is still present in {}"
                         .format(dest_cached._fqpath))

        # Verify Destination Link is present and points to new dest file
        ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
                                            str(dest_hashed.newname))
        self.assertTrue(ret, "The Dest link file is not present in {}"
                        .format(dest_hashed.hashedbrickobject._fqpath))

        file_path = dest_hashed.hashedbrickobject._fqpath + str(
            dest_hashed.newname)
        ret = (self._verify_file_links_to_specified_destination(
            dest_hashed.hashedbrickobject._host, file_path,
            str(dest_hashed.newname)))
        self.assertTrue(ret, "The dest link file not pointing towards "
                             "the desired file")
        g.log.info("The Destination link file is pointing to new file"
                   " as expected")
コード例 #6
0
    def test_file_rename_when_dest_neither_hash_cache_to_src_subvols(self):
        """
        - Destination file should exist
        - Source file hashed on sub volume(s1) and cached on
          another subvolume(s2)
        - Destination file should be hased to some other subvolume(s3)
          (neither s1 nor s2)
        - Destination file hased on subvolume(s3) but cached on
          remaining subvolume(s4)
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be there on hashed subvolume
          and should link to new destination file
        - source link file should be removed
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create source file and Get hashed subvol (s2)
        _, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file, which hashes
        # to another subvol (s2)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "couldn't find new hashed for destination file")

        # Rename the source file to the new file name
        src_hashed = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, src_hashed)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(source_file, src_hashed)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(src_link_subvol._fqpath,
                                      str(new_hashed.newname))))

        # Destination file cached on S4.
        # Find a subvol (s4) for dest file to linkto, other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, new_hashed.subvol_count):
                subvol_new = brickdir
                break

        dest_cached = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)
        self.assertIsNotNone(dest_cached,
                             "could not find new hashed for dstfile")
        # Create a file in S3
        _, _, dest_src = self._create_file_and_get_hashed_subvol(
            str(dest_cached.newname))

        # Verify the subvol is not S2 and S1
        self.assertNotEqual(dest_cached.subvol_count, new_hashed.subvol_count,
                            ("The subvol found for destination is same as "
                             "that of the source file hashed subvol"))
        self.assertNotEqual(dest_cached.subvol_count, src_count,
                            ("The subvol found for destination is same as "
                             "that of the source file cached subvol"))

        # Identify a name for dest that hashes to another subvol S3
        # Find a subvol (s3) for dest file to linkto, other than S1 and S2 and
        # S4
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, new_hashed.subvol_count,
                                dest_cached.subvol_count):
                subvol_new = brickdir
                break

        dest_hashed = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)

        # Move dest to new name
        dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
        ret = move_file(self.mounts[0].client_system, dest_src, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(dest_src, dest)))

        # Move Source file to Dest
        src = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, src, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(src, dest)))

        # Verify Destination File is removed
        ret = self._verify_file_exists(dest_cached.hashedbrickobject,
                                       str(dest_cached.newname))
        self.assertFalse(ret, "The Source file is still present in {}"
                         .format(dest_cached.hashedbrickobject._fqpath))

        # Verify Source link is removed
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(ret, "The source link file is still present in {}"
                         .format(src_link_subvol._fqpath))

        # Verify Destination Link is present and points to new dest file
        ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
                                            str(dest_hashed.newname))
        self.assertTrue(ret, "The Dest link file is not present in {}"
                        .format(dest_hashed.hashedbrickobject._fqpath))

        file_path = dest_hashed.hashedbrickobject._fqpath + str(
            dest_hashed.newname)
        ret = (self._verify_file_links_to_specified_destination(
            dest_hashed.hashedbrickobject._host, file_path,
            str(dest_hashed.newname)))
        self.assertTrue(ret, "The dest link file not pointing towards "
                             "the desired file")
        g.log.info("The Destination link file is pointing to new file"
                   " as expected")
コード例 #7
0
    def test_file_rename_when_dest_hash_src_cached(self):
        """
        - Destination file should exist
        - Source file hashed sub volume(s1) and cached on another subvolume(s2)
        - Destination file should be hased to subvolume where source file is
          stored(s2)
        - Destination file hased on subvolume(s2) but should be cached on
          some other subvolume(s3) than this two subvolume
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be removed
        - source link file should be removed
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create source file and Get hashed subvol (s2)
        src_subvol, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file, which hashes
        # to another subvol (s2)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "couldn't find new hashed for destination file")

        # Rename the source file to the new file name
        src_hashed = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, src_hashed)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(source_file, src_hashed)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(src_link_subvol._fqpath,
                                      str(new_hashed.newname))))

        # Find a subvol (s3) for dest file to linkto, other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, new_hashed.subvol_count):
                subvol_new = brickdir
                break

        new_hashed2 = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Create a file in the subvol S3
        dest_subvol, count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))

        # Verify the subvol is not same as S1 and S2
        self.assertNotEqual(count, src_count,
                            ("The subvol found for destination is same as that"
                             " of the source file cached subvol"))
        self.assertNotEqual(count, new_hashed.subvol_count,
                            ("The subvol found for destination is same as that"
                             " of the source file hashed subvol"))

        # Find a file name that hashes to S2
        dest_hashed = find_specific_hashed(self.subvols,
                                           "/",
                                           src_subvol)
        self.assertIsNotNone(dest_hashed,
                             "could not find new hashed for dstfile")

        # Rename destination to hash to S2 and verify
        dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
        ret = move_file(self.mounts[0].client_system, dest_file, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(dest_file, dest)))

        # Rename Source File to Dest
        ret = move_file(self.mounts[0].client_system, src_hashed, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(src_hashed, dest)))

        # Verify Destination File is removed
        ret = self._verify_file_exists(new_hashed2.hashedbrickobject,
                                       str(new_hashed2.newname))
        self.assertFalse(ret, "The Destination file is still present in {}"
                         .format(dest_subvol._fqpath))

        # Verify Source link is removed
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(ret, "The source link file is still present in {}"
                         .format(src_link_subvol._fqpath))

        # Verify Destination Link is removed
        ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
                                            str(dest_hashed.newname))
        self.assertFalse(ret, "The Dest link file is still present in {}"
                         .format(dest_hashed.hashedbrickobject._fqpath))
コード例 #8
0
    def test_file_rename_when_dest_doesnt_hash_src_cached_or_hashed(self):
        """
        - Destination file should exist
        - Source file is hashed on sub volume(s1) and cached on
          another subvolume(s2)
        - Destination file should be hased to subvolume(s3) other
          than above two subvolumes
        - Destination file hased on subvolume(s3) but destination file
          should be cached on same subvolume(s2) where source file is stored
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination file hashed on subvolume and should link
          to new destination file
        - source link file should be removed
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create source file and Get hashed subvol (s2)
        src_subvol, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file, which hashes
        # to another subvol (s1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "couldn't find new hashed for destination file")

        # Rename the source file to the new file name
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(source_file, dest_file)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(src_link_subvol._fqpath,
                                      str(new_hashed.newname))))

        # Identify a file name for dest to get stored in S2
        dest_cached_subvol = find_specific_hashed(self.subvols,
                                                  "/",
                                                  src_subvol)
        # Create the file with identified name
        _, _, dst_file = (
            self._create_file_and_get_hashed_subvol(
                str(dest_cached_subvol.newname)))
        # Verify its in S2 itself
        self.assertEqual(dest_cached_subvol.subvol_count, src_count,
                         ("The subvol found for destination is not same as "
                          "that of the source file cached subvol"))

        # Find a subvol (s3) for dest file to linkto, other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, new_hashed.subvol_count):
                subvol_new = brickdir
                break

        new_hashed2 = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Verify the subvol is not same as S1(src_count) and S2(dest_count)
        self.assertNotEqual(new_hashed2.subvol_count, src_count,
                            ("The subvol found for destination is same as that"
                             " of the source file cached subvol"))
        self.assertNotEqual(new_hashed2.subvol_count, new_hashed.subvol_count,
                            ("The subvol found for destination is same as that"
                             " of the source file hashed subvol"))

        # Rename the dest file to the new file name
        dst_file_ln = "{}/{}".format(self.mount_point,
                                     str(new_hashed2.newname))
        ret = move_file(self.mounts[0].client_system, dst_file, dst_file_ln)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(dst_file, dst_file_ln)))

        # Verify the Dest link file is stored on hashed sub volume(s3)
        dest_link_subvol = new_hashed2.hashedbrickobject
        ret = self._verify_link_file_exists(dest_link_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(dest_link_subvol._fqpath,
                                      str(new_hashed2.newname))))

        # Move/Rename Source File to Dest
        src_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, src_file, dst_file)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(src_file, dst_file)))

        # Verify Source file is removed
        ret = self._verify_file_exists(src_subvol, "test_source_file")
        self.assertFalse(ret, "The source file is still present in {}"
                         .format(src_subvol._fqpath))

        # Verify Source link is removed
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(ret, "The source link file is still present in {}"
                         .format(src_link_subvol._fqpath))

        # Verify the Destination link is on hashed subvolume
        ret = self._verify_link_file_exists(dest_link_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(dest_link_subvol._fqpath,
                                      str(new_hashed2.newname))))

        # Verify the dest link file points to new destination file
        file_path = dest_link_subvol._fqpath + str(new_hashed2.newname)
        ret = (self._verify_file_links_to_specified_destination(
            dest_link_subvol._host, file_path,
            str(dest_cached_subvol.newname)))
        self.assertTrue(ret, "The dest link file not pointing towards "
                             "the desired file")
        g.log.info("The Destination link file is pointing to new file"
                   " as expected")