def _rename_src(self, new_src_name):
     """Rename the srcfile to a new name such that it hashes and
     caches to different subvols"""
     ret = move_file(self.client, "{}/srcfile".format(self.m_point),
                     ("{}/".format(self.m_point) + new_src_name))
     self.assertTrue(ret, ("Failed to move file srcfile and {}".format(
         new_src_name)))
    def test_file_rename_when_source_and_dest_hash_same_subvol(self):
        """
        Case 2:
        - Destination file does not exist
        - Source file is stored on hashed subvolume(s1) it self
        - Destination file should be hashed to same subvolume(s1)
            mv <source_file> <destination_file>
        - Source file should be renamed to destination file
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Create soruce file and Get hashed subvol (s1)
        source_hashed_subvol, count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file such that the new name hashes to a new subvol
        new_hashed = find_specific_hashed(self.subvols, "/",
                                          source_hashed_subvol)
        self.assertIsNotNone(new_hashed,
                             "could not find new hashed for destination file")

        # Rename the source file to the destination file
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(
            ret,
            "Failed to move files {} and {}".format(source_file, dest_file))

        _, rename_count = find_hashed_subvol(self.subvols, "/",
                                             str(new_hashed.newname))
        self.assertEqual(count, rename_count,
                         "The hashed subvols for src and dest are not same.")
    def test_dht_file_rename_dest_exists_src_and_dest_hash_same(self):
        """
        Case 7:
        - Destination file should exist
        - Source file is stored on hashed subvolume(s1) it self
        - Destination file should be hashed to same subvolume(s1)
        - Destination file is stored on hashed subvolume
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed to destination file
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Create soruce file and Get hashed subvol (s1)
        source_hashed_subvol, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file that hashes
        # to same subvol (s1)
        new_hashed = find_specific_hashed(self.subvols, "/",
                                          source_hashed_subvol)
        self.assertIsNotNone(
            new_hashed, "Couldn't find a new hashed subvol "
            "for destination file")

        # Create destination_file and get its hashed subvol (should be s1)
        dest_hashed_subvol, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))

        # Verify the subvols are not same for source and destination files
        self.assertEqual(src_count, dest_count,
                         "The subvols for src and dest are not same.")

        # Rename the source file to the destination file
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move files {} and {}".format(source_file, dest_file)))

        # Verify the file move and the destination file is hashed to
        # same subvol or not
        _, rename_count = find_hashed_subvol(self.subvols, "/",
                                             str(new_hashed.newname))
        self.assertEqual(dest_count, rename_count,
                         ("The subvols for source : {} and dest : {} are "
                          "not same.".format(source_hashed_subvol._fqpath,
                                             dest_hashed_subvol._fqpath)))

        # Verify destination file is removed
        ret = self._verify_file_exists(dest_hashed_subvol,
                                       str(new_hashed.newname))
        self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
                              " : {}".format(str(new_hashed.newname),
                                             dest_hashed_subvol._fqpath)))
        g.log.info("The destination file is removed as expected")
    def test_dht_file_rename_dest_exists_src_and_dest_hash_diff(self):
        """
        case 6 :
        - Destination file should exist
        - Source file is stored on hashed subvolume(s1) it self
        - Destination file should be hashed to some other subvolume(s2)
        - Destination file is stored on hashed subvolume
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination hashed file should be created on its hashed
          subvolume(s2)
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Create source file and Get hashed subvol (s1)
        _, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file, which hashes
        # to another subvol (s2)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "could'nt find new hashed for destination file")

        # create destination_file and get its hashed subvol (s2)
        dest_hashed_subvol, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))

        # Verify the subvols are not same for source and destination files
        self.assertNotEqual(src_count, dest_count,
                            "The subvols for src and dest are same.")

        # Rename the source file to the destination file
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            "Failed to move files {} and {}".format(source_file, dest_file))

        # Verify destination file is removed
        ret = self._verify_file_exists(dest_hashed_subvol,
                                       str(new_hashed.newname))
        self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
                              " : {}".format(str(new_hashed.newname),
                                             dest_hashed_subvol._fqpath)))
        g.log.info("The destination file is removed as expected")

        # Verify the Destination link is found in new subvol (s2)
        ret = self._verify_link_file_exists(dest_hashed_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The New hashed volume {} doesn't have the "
                  "expected linkto file {}".format(str(
                      new_hashed.newname), dest_hashed_subvol._fqpath)))
        g.log.info("New hashed volume has the expected linkto file")
    def test_file_rename_when_source_and_dest_hash_diff_subvol(self):
        """
        case 1 :
        - Destination file does not exist
        - Source file is stored on hashed subvolume(s1) it self
        - Destination file should be hashed to some other subvolume(s2)
            mv <source_file> <destination_file>
        - Source file should be renamed to to Destination file.
        - Destination link file should be created on its hashed
          subvolume(s2)
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Create soruce file and Get hashed subvol (s2)
        _, count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file such that the new name hashes to a new subvol (S1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(
            new_hashed,
            ("could'nt find new hashed for {}".format(source_file)))
        src_link_subvol = new_hashed.hashedbrickobject

        # Verify the subvols are not same for source and destination files
        self.assertNotEqual(count, new_hashed.subvol_count,
                            "The subvols for src and dest are same.")

        # Rename the source file to the destination file
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move files {} and {}".format(source_file, dest_file)))

        # Verify the link file is found in new subvol
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The hashed subvol {} doesn't have the "
                  "expected linkto file: {}".format(src_link_subvol._fqpath,
                                                    str(new_hashed.newname))))
        g.log.info("New hashed volume has the expected linkto file")
    def test_file_rename_when_src_file_and_dest_file_hash_same_subvol(self):
        """
       Case 4:
       - Destination file does not exist
       - Source link file is stored on hashed sub volume(s1) and Source
         file is stored on another subvolume(s2)
       - Destination file should be hashed to same subvolume(s2)
            mv <source_file> <destination_file>
       - Source file should be ranamed to destination file
       - source link file should be removed.
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Get hashed subvol (S2)
        source_hashed_subvol, count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file to create link in hashed subvol -(s1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(
            new_hashed,
            ("could not find new hashed for {}".format(source_file)))

        # Rename the source file to the new file name
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The New hashed volume {} doesn't have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))

        # Get a file name to hash to the subvol s2
        new_hashed2 = find_specific_hashed(self.subvols, "/",
                                           source_hashed_subvol)
        self.assertIsNotNone(
            new_hashed2, "Could not find a name hashed"
            "to the given subvol")

        _, rename_count = (find_hashed_subvol(self.subvols, "/",
                                              str(new_hashed2.newname)))
        self.assertEqual(count, rename_count,
                         "The subvols for src and dest are not same.")

        # Move the source file to the new file name
        source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
        ret = move_file(self.clients[0], source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # check that the source link file is removed.
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(
            ret, ("The New hashed volume {} still have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))
        g.log.info("The source link file is removed")
Example #7
0
    def test_file_rename_when_dest_neither_hash_cache_to_src_subvols(self):
        """
        - Destination file should exist
        - Source file hashed on sub volume(s1) and cached on
          another subvolume(s2)
        - Destination file should be hased to some other subvolume(s3)
          (neither s1 nor s2)
        - Destination file hased on subvolume(s3) but cached on
          remaining subvolume(s4)
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be there on hashed subvolume
          and should link to new destination file
        - source link file should be removed
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create source file and Get hashed subvol (s2)
        _, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file, which hashes
        # to another subvol (s2)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "couldn't find new hashed for destination file")

        # Rename the source file to the new file name
        src_hashed = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, src_hashed)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(source_file, src_hashed)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(src_link_subvol._fqpath,
                                      str(new_hashed.newname))))

        # Destination file cached on S4.
        # Find a subvol (s4) for dest file to linkto, other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, new_hashed.subvol_count):
                subvol_new = brickdir
                break

        dest_cached = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)
        self.assertIsNotNone(dest_cached,
                             "could not find new hashed for dstfile")
        # Create a file in S3
        _, _, dest_src = self._create_file_and_get_hashed_subvol(
            str(dest_cached.newname))

        # Verify the subvol is not S2 and S1
        self.assertNotEqual(dest_cached.subvol_count, new_hashed.subvol_count,
                            ("The subvol found for destination is same as "
                             "that of the source file hashed subvol"))
        self.assertNotEqual(dest_cached.subvol_count, src_count,
                            ("The subvol found for destination is same as "
                             "that of the source file cached subvol"))

        # Identify a name for dest that hashes to another subvol S3
        # Find a subvol (s3) for dest file to linkto, other than S1 and S2 and
        # S4
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, new_hashed.subvol_count,
                                dest_cached.subvol_count):
                subvol_new = brickdir
                break

        dest_hashed = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)

        # Move dest to new name
        dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
        ret = move_file(self.mounts[0].client_system, dest_src, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(dest_src, dest)))

        # Move Source file to Dest
        src = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, src, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(src, dest)))

        # Verify Destination File is removed
        ret = self._verify_file_exists(dest_cached.hashedbrickobject,
                                       str(dest_cached.newname))
        self.assertFalse(ret, "The Source file is still present in {}"
                         .format(dest_cached.hashedbrickobject._fqpath))

        # Verify Source link is removed
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(ret, "The source link file is still present in {}"
                         .format(src_link_subvol._fqpath))

        # Verify Destination Link is present and points to new dest file
        ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
                                            str(dest_hashed.newname))
        self.assertTrue(ret, "The Dest link file is not present in {}"
                        .format(dest_hashed.hashedbrickobject._fqpath))

        file_path = dest_hashed.hashedbrickobject._fqpath + str(
            dest_hashed.newname)
        ret = (self._verify_file_links_to_specified_destination(
            dest_hashed.hashedbrickobject._host, file_path,
            str(dest_hashed.newname)))
        self.assertTrue(ret, "The dest link file not pointing towards "
                             "the desired file")
        g.log.info("The Destination link file is pointing to new file"
                   " as expected")
Example #8
0
    def test_quota_with_renamed_dir(self):
        """
        Verifying directory quota functionality with respect to
        the limit-usage option.
        If a directory has limit set on it and the same directory is renamed ,
        then on doing a quota list the changed name should be reflected.

        * Enable quota on volume
        * Create a directory 'foo' from client
        * Set quota limit of 1GB on /foo
        * Check if quota limit set is correct
        * Rename directory 'foo' to 'bar' from client
        * Check if quota limit set on 'bar' is same as before
        """

        # Enable Quota on the volume
        g.log.info("Enabling Quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertFalse(ret,
                         "Failed to enable Quota on volume %s" % self.volname)

        # Create a directory named 'foo' under any mount dir
        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        g.log.info("Creating dir named 'foo' from client %s", client)
        ret = mkdir(client, "%s/foo" % mount_dir)
        self.assertTrue(
            ret, "Failed to create dir under %s-%s" % (client, mount_dir))
        g.log.info("Directory 'foo' created successfully")

        # Set Quota Limit of 1GB for dir foo
        g.log.info(
            "Setting a quota limit of 1GB on directory 'foo' inside "
            "volume %s", self.volname)
        ret, _, _ = quota_limit_usage(self.mnode, self.volname, "/foo", '1GB')
        self.assertFalse(ret, "Failed to set Quota for dir '/foo'")
        g.log.info("Set quota for dir '/foo' successfully")

        # Get the Quota list and check '/foo' has Quota Limit of 1GB
        g.log.info(
            "Validating if the Quota limit set is correct for the "
            "path '/foo' in volume %s", self.volname)
        ret = quota_validate(self.mnode,
                             self.volname,
                             path="/foo",
                             hard_limit=1073741824)
        self.assertTrue(ret, ("Quota Limit of 1GB was not set properly on the "
                              "path  /foo' in volume %s", self.volname))
        g.log.info(
            "Successfully Validated Quota Limit of 1GB is set on the "
            "path '/foo' in volume %s", self.volname)

        # Rename the dir foo to bar
        g.log.info("Renaming dir named 'foo' to 'bar' from client %s", client)
        ret = move_file(client, "%s/foo" % (mount_dir), "%s/bar" % (mount_dir))
        self.assertTrue(
            ret, "Failed to rename the directory 'foo' under "
            "%s-%s" % (client, mount_dir))
        g.log.info("Renamed the directory 'foo' to 'bar' successfully")

        # Again get the quota list to check if directory /bar is present
        g.log.info(
            "Validating if the Quota limit set is correct for the "
            "path '/bar' in volume %s", self.volname)
        ret = quota_validate(self.mnode,
                             self.volname,
                             path="/bar",
                             hard_limit=1073741824)
        self.assertTrue(ret, ("Failed to validate quota limit on the directory"
                              " 'bar'"))
        g.log.info(
            "Successfully Validated Quota Limit of 1GB is set on the "
            "path '/bar' in volume %s", self.volname)
    def test_gfind_renames(self):
        """
        Verifying the glusterfind functionality with renames of files.

        * Create a volume
        * Create a session on the volume
        * Create various files from mount point
        * Perform glusterfind pre
        * Perform glusterfind post
        * Check the contents of outfile
        * Rename the files created from mount point
        * Perform glusterfind pre
        * Perform glusterfind post
        * Check the contents of outfile
          Files renamed must be listed
        """

        # pylint: disable=too-many-statements
        # Creating a session for the volume
        g.log.info("Creating a session for the volume %s", self.volname)
        ret, _, _ = gfind_create(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Unexpected: Creation of a session for the "
                                  "volume %s failed" % self.volname))
        g.log.info("Successfully created a session for the volume %s",
                   self.volname)

        # Perform glusterfind list to check if session exists
        g.log.info("Performing glusterfind list to check if the session is "
                   "created")
        ret, _, _ = gfind_list(self.mnode,
                               volname=self.volname,
                               sessname=self.session)
        self.assertEqual(ret, 0, "Failed to list the glusterfind session")
        g.log.info("Successfully listed the glusterfind session")

        # Starting IO on the mounts
        g.log.info("Creating Files on %s:%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        cmd = ("cd %s ; for i in `seq 1 10` ; "
               "do dd if=/dev/urandom of=file$i bs=1M count=1 ; "
               "done" % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create files on mountpoint")
        g.log.info("Files created successfully on mountpoint")

        # Check if the files exist
        g.log.info("Checking the existence of files created during IO")
        for i in range(1, 11):
            ret = file_exists(self.mounts[0].client_system,
                              '%s/file%s' % (self.mounts[0].mountpoint, i))
            self.assertTrue(ret,
                            "Unexpected: File 'file%s' does not exist" % i)
            g.log.info("Successfully validated existence of 'file%s'", i)

        sleep(5)

        # Perform glusterfind pre for the session
        g.log.info("Performing glusterfind pre for the session %s",
                   self.session)
        ret, _, _ = gfind_pre(self.mnode,
                              self.volname,
                              self.session,
                              self.outfiles[0],
                              full=True,
                              noencode=True,
                              debug=True)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind pre"))
        g.log.info("Successfully performed glusterfind pre")

        # Check if the outfile exists
        g.log.info("Checking if outfile created during glusterfind pre command"
                   " exists")
        ret = file_exists(self.mnode, self.outfiles[0])
        self.assertTrue(
            ret, "Unexpected: File '%s' does not exist" % self.outfiles[0])
        g.log.info("Successfully validated existence of '%s'",
                   self.outfiles[0])

        # Check if all the files are listed in the outfile
        for i in range(1, 11):
            ret = check_if_pattern_in_file(self.mnode, 'file%s' % i,
                                           self.outfiles[0])
            self.assertEqual(ret, 0, ("File 'file%s' not listed in %s" %
                                      (i, self.outfiles[0])))
            g.log.info("File 'file%s' listed in %s", i, self.outfiles[0])

        # Perform glusterfind post for the session
        g.log.info("Performing glusterfind post for the session %s",
                   self.session)
        ret, _, _ = gfind_post(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind post"))
        g.log.info("Successfully performed glusterfind post")

        # Rename the files created from mount point
        g.log.info("Renaming the Files on %s:%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        for i in range(1, 11):
            ret = move_file(
                self.mounts[0].client_system,
                "%s/file%s" % (self.mounts[0].mountpoint, i),
                "%s/renamed-file%s" % (self.mounts[0].mountpoint, i))
            self.assertTrue(ret, "Failed to rename file%s" % i)
        g.log.info("Successfully renamed all the files")

        sleep(5)

        # Perform glusterfind pre for the session
        g.log.info("Performing glusterfind pre for the session %s",
                   self.session)
        ret, _, _ = gfind_pre(self.mnode,
                              self.volname,
                              self.session,
                              self.outfiles[1],
                              debug=True)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind pre"))
        g.log.info("Successfully performed glusterfind pre")

        # Check if the outfile exists
        g.log.info("Checking if outfile created during glusterfind pre command"
                   " exists")
        ret = file_exists(self.mnode, self.outfiles[1])
        self.assertTrue(
            ret, "Unexpected: File '%s' does not exist" % self.outfiles[1])
        g.log.info("Successfully validated existence of '%s'",
                   self.outfiles[1])

        # Check if all the files are listed in the outfile
        for i in range(1, 11):
            pattern = 'RENAME file%s renamed-file%s' % (i, i)
            ret = check_if_pattern_in_file(self.mnode, pattern,
                                           self.outfiles[1])
            self.assertEqual(ret, 0,
                             ("File 'renamed-file%s' not listed in %s" %
                              (i, self.outfiles[1])))
            g.log.info("File 'file%s' listed in %s", i, self.outfiles[1])
Example #10
0
    def test_file_rename_when_dest_hash_src_cached(self):
        """
        - Destination file should exist
        - Source file hashed sub volume(s1) and cached on another subvolume(s2)
        - Destination file should be hased to subvolume where source file is
          stored(s2)
        - Destination file hased on subvolume(s2) but should be cached on
          some other subvolume(s3) than this two subvolume
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be removed
        - source link file should be removed
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create source file and Get hashed subvol (s2)
        src_subvol, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file, which hashes
        # to another subvol (s2)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "couldn't find new hashed for destination file")

        # Rename the source file to the new file name
        src_hashed = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, src_hashed)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(source_file, src_hashed)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(src_link_subvol._fqpath,
                                      str(new_hashed.newname))))

        # Find a subvol (s3) for dest file to linkto, other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, new_hashed.subvol_count):
                subvol_new = brickdir
                break

        new_hashed2 = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Create a file in the subvol S3
        dest_subvol, count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))

        # Verify the subvol is not same as S1 and S2
        self.assertNotEqual(count, src_count,
                            ("The subvol found for destination is same as that"
                             " of the source file cached subvol"))
        self.assertNotEqual(count, new_hashed.subvol_count,
                            ("The subvol found for destination is same as that"
                             " of the source file hashed subvol"))

        # Find a file name that hashes to S2
        dest_hashed = find_specific_hashed(self.subvols,
                                           "/",
                                           src_subvol)
        self.assertIsNotNone(dest_hashed,
                             "could not find new hashed for dstfile")

        # Rename destination to hash to S2 and verify
        dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
        ret = move_file(self.mounts[0].client_system, dest_file, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(dest_file, dest)))

        # Rename Source File to Dest
        ret = move_file(self.mounts[0].client_system, src_hashed, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(src_hashed, dest)))

        # Verify Destination File is removed
        ret = self._verify_file_exists(new_hashed2.hashedbrickobject,
                                       str(new_hashed2.newname))
        self.assertFalse(ret, "The Destination file is still present in {}"
                         .format(dest_subvol._fqpath))

        # Verify Source link is removed
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(ret, "The source link file is still present in {}"
                         .format(src_link_subvol._fqpath))

        # Verify Destination Link is removed
        ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
                                            str(dest_hashed.newname))
        self.assertFalse(ret, "The Dest link file is still present in {}"
                         .format(dest_hashed.hashedbrickobject._fqpath))
    def test_stack_overflow(self):
        """
        Description: Tests to check that there is no stack overflow
                     in readdirp with parallel-readdir enabled.
        Steps :
        1) Create a volume.
        2) Mount the volume using FUSE.
        3) Enable performance.parallel-readdir and
           performance.readdir-ahead on the volume.
        4) Create 10000 files on the mount point.
        5) Add-brick to the volume.
        6) Perform fix-layout on the volume (not rebalance).
        7) From client node, rename all the files, this will result in creation
           of linkto files on the newly added brick.
        8) Do ls -l (lookup) on the mount-point.
        """
        # pylint: disable=too-many-statements
        # Enable performance.parallel-readdir and
        # performance.readdir-ahead on the volume
        options = {"performance.parallel-readdir": "enable",
                   "performance.readdir-ahead": "enable"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Failed to set volume options")
        g.log.info("Successfully set volume options")

        # Creating 10000 files on volume root
        m_point = self.mounts[0].mountpoint
        command = 'touch ' + m_point + '/file{1..10000}_0'
        ret, _, _ = g.run(self.clients[0], command)
        self.assertEqual(ret, 0, "File creation failed on %s"
                         % m_point)
        g.log.info("Files successfully created on the mount point")

        # Add bricks to the volume
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Failed to expand the volume %s",
                              self.volname))
        g.log.info("Expanding volume is successful on "
                   "volume %s", self.volname)

        # Perform fix-layout on the volume
        ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=True)
        self.assertEqual(ret, 0, 'Failed to start rebalance')
        g.log.info('Rebalance is started')

        # Wait for fix-layout to complete
        ret = wait_for_fix_layout_to_complete(self.mnode, self.volname,
                                              timeout=3000)
        self.assertTrue(ret, ("Fix-layout failed on volume %s",
                              self.volname))
        g.log.info("Fix-layout is successful on "
                   "volume %s", self.volname)

        # Rename all files from client node
        for i in range(1, 10000):
            ret = move_file(self.clients[0],
                            '{}/file{}_0'.format(m_point, i),
                            '{}/file{}_1'.format(m_point, i))
            self.assertTrue(ret, "Failed to rename files")
        g.log.info("Files renamed successfully")

        # Perform lookup from the mount-point
        cmd = "ls -lR " + m_point
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to lookup")
        g.log.info("Lookup successful")
    def test_subdir_when_renamed(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 1 subdir on mountpoint "d1"
        Auth allow - Client1(d1),Client2(full volume)
        Mount the subdir "d1" on client1 and volume on client2
        Start IO's on all the mount points
        Perform rename operation from client2.Rename the subdir
        "d1" to "d1_renamed"
        unmount volume and subdir from clients
        Try mounting "d1" on client 1.This should fail.
        Try mounting "d1_renamed" on client 1.This should fail.
        Again set authentication.Auth allow -
        Client1(d1_renamed),Client2(full volume)
        Mount "d1_renamed" on client1 and volume on client2
        """

        # Create  directory d1 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/d1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'd1' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectoy "d1" to access by client1
        # and volume to access by client2
        g.log.info(
            'Setting authentication on subdirectory d1 to access'
            'by client %s and on volume to access by client %s',
            self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/d1': [self.clients[0]],
            '/': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for mounting subdir mount and volume
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/d1" % self.volname
        self.subdir_mounts[0].client_system = self.clients[0]
        self.subdir_mounts[1].client_system = self.clients[1]

        # Mount Subdirectory d1 on client 1 and volume on client 2
        for mount_obj in self.subdir_mounts:
            mountpoint = mount_obj.mountpoint
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted sub directory and volume to"
                   "authenticated clients")

        # Start IO on all the mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Rename the subdir "d1" to "d1_renamed" from client2
        source_fpath = "%s/d1" % mountpoint
        dest_fpath = "%s/d1_renamed" % mountpoint
        ret = move_file(self.clients[1], source_fpath, dest_fpath)
        self.assertTrue(ret, "Rename subdirectory failed")
        g.log.info('Renamed directory %s to %s', source_fpath, dest_fpath)

        # unmount volume and subdir from client
        ret = self.unmount_volume(self.subdir_mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes Unmounted successfully")

        # Try mounting subdir "d1" on client1
        _, _, _ = mount_volume("%s/d1" % self.volname, self.mount_type,
                               mountpoint, self.mnode, self.clients[0])

        ret = is_mounted(self.volname, mountpoint, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertEqual(
            ret, 0, "d1 mount should have failed.But d1 is"
            "successfully mounted on mount point: %s" % mountpoint)
        g.log.info("subdir %s/d1 is not mounted as expected %s", self.volname,
                   mountpoint)

        # Try mounting subdir "d1_renamed" on client1
        _, _, _ = mount_volume("%s/d1_renamed" % self.volname, self.mount_type,
                               mountpoint, self.mnode, self.clients[0])

        ret = is_mounted("%s/d1_renamed" % self.volname, mountpoint,
                         self.mnode, self.clients[0], self.mount_type)
        self.assertEqual(
            ret, 0, "d1_renamed mount should have failed.But"
            "d1_renamed is successfully mounted on : %s" % mountpoint)
        g.log.info("subdir %s/d1_renamed is not mounted as expected %s",
                   self.volname, mountpoint)

        # Set authentication on the subdirectoy "d1_renamed" to access
        # by client1 and volume to access by client2
        g.log.info(
            'Setting authentication on subdirectory d1_renamed to'
            'access by client %s and on volume to access by client %s',
            self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/d1_renamed': [self.clients[0]],
            '/': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Overwriting the list of subdir mount, directory d1 to d1_renamed
        self.subdir_mounts[0].volname = "%s/d1_renamed" % self.volname

        # Mount Subdirectory d1_renamed on client 1 and volume on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)

        g.log.info("Successfully mounted sub directory and volume to"
                   "authenticated clients")

        # Get stat of all the files/dirs created from both clients.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
    def test_file_rename_dest_exist_and_not_hash_src_srclink_subvol(self):
        """
        Case 8:
        - Destination file should exist
        - Source file is hashed sub volume(s1) and
          cached on another subvolume(s2)
        - Destination file should be hashed to some other subvolume(s3)
          (should not be same subvolumes mentioned in above condition)
             mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Souce hashed file should be removed
        - Destination hashed file should be created on its hashed subvolume(s3)
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals
        # pylint: disable=unsubscriptable-object

        # Find a non hashed subvolume(or brick)
        # Create soruce file and Get hashed subvol (s2)
        _, count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file to create link in hashed subvol -(s1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "could not find new hashed for dstfile")
        count2 = new_hashed.subvol_count
        # Rename the source file to the new file name
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The hashed subvol {} doesn't have the "
                  "expected linkto file: {}".format(src_link_subvol._fqpath,
                                                    str(new_hashed.newname))))

        # Find a subvol (s3) other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (count, count2):
                subvol_new = brickdir
                break

        new_hashed2 = find_specific_hashed(self.subvols, "/", subvol_new)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Create destination file in a new subvol (s3)
        dest_hashed_subvol, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))

        # Verify the subvol is not same as S1 or S2
        self.assertNotEqual(
            count2, dest_count,
            ("The subvols for src :{} and dest : {} are same.".format(
                count2, dest_count)))
        # Verify the subvol is not same as S1 or S2
        self.assertNotEqual(
            count, dest_count,
            ("The subvols for src :{} and dest : {} are same.".format(
                count, dest_count)))

        # Rename the source file to the destination file
        source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # Verify destination file is removed
        ret = self._verify_file_exists(dest_hashed_subvol,
                                       str(new_hashed2.newname))
        self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
                              " : {}".format(str(new_hashed.newname),
                                             dest_hashed_subvol._fqpath)))
        g.log.info("The destination file is removed as expected")

        # Check that the source link file is removed.
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(
            ret, ("The New hashed volume {} still have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))
        g.log.info("The source link file is removed")

        # Check Destination link file is created on its hashed sub-volume(s3)
        ret = self._verify_link_file_exists(dest_hashed_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(
            ret, ("The New hashed volume {} doesn't have the "
                  "expected linkto file {}".format(dest_hashed_subvol._fqpath,
                                                   str(new_hashed2.newname))))
        g.log.info("Destinaion link is created in desired subvol")
    def test_delete_file_in_migration(self):
        """
        Verify that if a file is picked for migration and then deleted, the
        file should be removed successfully.
        * First create a big data file of 10GB.
        * Rename that file, such that after rename a linkto file is created
          (we are doing this to make sure that file is picked for migration.)
        * Add bricks to the volume and trigger rebalance using force option.
        * When the file has been picked for migration, delete that file from
          the mount point.
        * Check whether the file has been deleted or not on the mount-point
          as well as the back-end bricks.
        """

        # pylint: disable=too-many-statements
        # pylint: disable=too-many-locals
        # pylint: disable=protected-access

        mountpoint = self.mounts[0].mountpoint

        # Location of source file
        src_file = mountpoint + '/file1'

        # Finding a file name such that renaming source file to it will form a
        # linkto file
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        newhash = find_new_hashed(subvols, "/", "file1")
        new_name = str(newhash.newname)
        new_host = str(newhash.hashedbrickobject._host)
        new_name_path = str(newhash.hashedbrickobject._fqpath)[:-2]

        # Location of destination file to which source file will be renamed
        dst_file = '{}/{}'.format(mountpoint, new_name)
        # Create a 10GB file source file
        cmd = (
            "dd if=/dev/urandom of={} bs=1024K count=10000".format(src_file))
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("File {} creation failed".format(src_file)))

        # Move file such that it hashes to some other subvol and forms linkto
        # file
        ret = move_file(self.clients[0], src_file, dst_file)
        self.assertTrue(ret, "Rename failed")
        g.log.info('Renamed file %s to %s', src_file, dst_file)

        # Check if "file_two" is linkto file
        ret = is_linkto_file(new_host, '{}/{}'.format(new_name_path, new_name))
        self.assertTrue(ret, "File is not a linkto file")
        g.log.info("File is linkto file")

        # Expanding volume by adding bricks to the volume
        ret, _, _ = add_brick(self.mnode,
                              self.volname,
                              self.add_brick_list,
                              force=True)
        self.assertEqual(ret, 0,
                         ("Volume {}: Add-brick failed".format(self.volname)))
        g.log.info("Volume %s: add-brick successful", self.volname)

        # Log Volume Info and Status after expanding the volume
        log_volume_info_and_status(self.mnode, self.volname)

        # Start Rebalance
        ret, _, _ = rebalance_start(self.mnode, self.volname, force=True)
        self.assertEqual(
            ret, 0,
            ("Volume {}: Failed to start rebalance".format(self.volname)))
        g.log.info("Volume %s : Rebalance started ", self.volname)

        # Check if rebalance is running and delete the file
        status_info = get_rebalance_status(self.mnode, self.volname)
        status = status_info['aggregate']['statusStr']
        self.assertEqual(status, 'in progress', "Rebalance is not running")
        ret, _, _ = g.run(self.clients[0], (" rm -rf {}".format(dst_file)))
        self.assertEqual(ret, 0, ("Cannot delete file {}".format(dst_file)))
        g.log.info("File is deleted")

        # Check if the file is present on the mount point
        ret, _, _ = g.run(self.clients[0], ("ls -l {}".format(dst_file)))
        self.assertEqual(ret, 2, ("Failed to delete file {}".format(dst_file)))

        # Check if the file is present on the backend bricks
        bricks = get_all_bricks(self.mnode, self.volname)
        for brick in bricks:
            node, brick_path = brick.split(':')
            ret, _, _ = g.run(node, "ls -l {}/{}".format(brick_path, new_name))
            self.assertEqual(
                ret, 2, "File is still present on"
                " back-end brick: {}".format(brick_path))
            g.log.info("File is deleted from back-end brick: %s", brick_path)

        # Check if rebalance process is still running
        for server in self.servers:
            ret, _, _ = g.run(server, "pgrep rebalance")
            self.assertEqual(ret, 1, ("Rebalance process is still"
                                      " running on server {}".format(server)))
            g.log.info("Rebalance process is not running")
    def test_file_rename_when_dest_cache_to_src_subvol(self):
        """
        - Destination file should exist
        - Source file is stored on hashed subvolume it self
        - Destination file should be hased to some other subvolume(s2)
        - Destination file hashed on subvolume(s2) but cached on the
          subvolume(s1) where souce file is present
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be there on hashed subvolume and
          should link to new destination file
        """
        # pylint: disable=protected-access

        # Create soruce file and Get hashed subvol (s1)
        source_hashed_subvol, src_count, _ = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find name for dest file to cache to S1
        dest_subvol = find_specific_hashed(self.subvols, "/",
                                           source_hashed_subvol)
        dest_name = str(dest_subvol.newname)

        # Create destination file in subvol S1
        _, dest_count, _ = self._create_file_and_get_hashed_subvol(dest_name)

        # Verify its subvol (s1)
        self.assertEqual(src_count, dest_count,
                         ("The newly created file falls under subvol {} "
                          "rather than {}".format(dest_count, src_count)))

        # Rename dest file such that it hashes to some other subvol S2
        dest_hashed_subvol = find_new_hashed(self.subvols, "/", dest_name)
        self.assertIsNotNone(dest_hashed_subvol,
                             "could not find new hashed for dstfile")

        # Rename/Move the file
        dest_file = "{}/{}".format(self.mount_point,
                                   dest_hashed_subvol.newname)
        src_file = "{}/{}".format(self.mount_point, dest_name)
        ret = move_file(self.mounts[0].client_system, src_file, dest_file)
        self.assertTrue(
            ret, "Failed to move files {} and {}".format(src_file, dest_file))

        # Verify the Dest link file is stored on hashed sub volume(s2)
        dest_link_subvol = dest_hashed_subvol.hashedbrickobject
        ret = self._verify_link_file_exists(dest_link_subvol,
                                            str(dest_hashed_subvol.newname))
        self.assertTrue(
            ret,
            ("The hashed subvol {} doesn't have the "
             "expected linkto file: {}".format(
                 dest_link_subvol._fqpath, str(dest_hashed_subvol.newname))))

        # Rename Source to Dest
        src = "{}/{}".format(self.mount_point, "test_source_file")
        dest_file = "{}/{}".format(self.mount_point,
                                   dest_hashed_subvol.newname)
        ret = move_file(self.mounts[0].client_system, src, dest_file)
        self.assertTrue(
            ret, "Failed to move files {} and {}".format(src, dest_file))

        # Verify destination file is removed
        ret = self._verify_file_exists(dest_subvol.hashedbrickobject,
                                       dest_name)
        self.assertFalse(ret, ("Destination file : {} is not removed in subvol"
                               " : {}".format(str(dest_hashed_subvol.newname),
                                              dest_link_subvol._fqpath)))
        g.log.info("The destination file is removed as expected")

        # Verify the Destination link is present
        ret = self._verify_link_file_exists(dest_link_subvol,
                                            str(dest_hashed_subvol.newname))
        self.assertTrue(
            ret,
            ("The hashed subvol {} still have the "
             "expected linkto file: {}".format(
                 dest_link_subvol._fqpath, str(dest_hashed_subvol.newname))))

        g.log.info("The Destination link file is present as expected")

        # Verify the dest link file points to new destination file
        file_path = dest_link_subvol._fqpath + str(dest_hashed_subvol.newname)
        ret = (self._verify_file_links_to_specified_destination(
            dest_link_subvol._host, file_path,
            str(dest_hashed_subvol.newname)))
        self.assertTrue(
            ret, "The dest link file not pointing towards "
            "the desired file")
        g.log.info("The Destination link file is pointing to new file"
                   " as expected")
    def test_file_rename_when_dest_hash_to_src_subvol(self):
        """
        - Destination file should exist
        - Source file is stored on hashed subvolume it self
        - Destination file should be hased to same subvolume(s1)
          where source file is
        - Destination file hased subvolume(s1) but cached onsubvolume(s2)
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be removed
        """
        # pylint: disable=protected-access

        # Create soruce file and Get hashed subvol (s1)
        source_hashed_subvol, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a file name that hashes to another subvol (s2)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(
            new_hashed,
            ("could'nt find new hashed for {}".format(source_file)))

        # Create destination file in subvol S2
        _, dest_count, dest_file = (self._create_file_and_get_hashed_subvol(
            str(new_hashed.newname)))

        # Rename dest file such that it hashes to S1
        new_hashed2 = find_specific_hashed(self.subvols, "/",
                                           source_hashed_subvol)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Verify the subvol is S1 itself
        self.assertEqual(
            new_hashed2.subvol_count, src_count,
            "The destination file is not stored to desired "
            "subvol :{}".format(dest_count))

        # Rename/Move the file
        dest_file2 = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
        ret = move_file(self.mounts[0].client_system, dest_file, dest_file2)
        self.assertTrue(
            ret,
            "Failed to move files {} and {}".format(source_file, dest_file))

        # Verify the Dest link file is stored on hashed sub volume(s1)
        dest_link_subvol = new_hashed2.hashedbrickobject
        ret = self._verify_link_file_exists(dest_link_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(
            ret, ("The hashed subvol {} doesn't have the "
                  "expected linkto file: {}".format(dest_link_subvol._fqpath,
                                                    str(new_hashed2.newname))))

        # Rename Source to Dest
        src = "{}/{}".format(self.mount_point, "test_source_file")
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
        ret = move_file(self.mounts[0].client_system, src, dest_file)
        self.assertTrue(
            ret, "Failed to move files {} and {}".format(src, dest_file))

        # Verify destination file is removed
        ret = self._verify_file_exists(new_hashed.hashedbrickobject,
                                       str(new_hashed.newname))
        self.assertFalse(
            ret, ("Destination file : {} is not removed in subvol"
                  " : {}".format(str(new_hashed.newname),
                                 new_hashed.hashedbrickobject._fqpath)))
        g.log.info("The destination file is removed as expected")

        # Verify the Destination link is removed
        ret = self._verify_link_file_exists(new_hashed2.hashedbrickobject,
                                            str(new_hashed2.newname))
        self.assertFalse(ret, ("The hashed subvol {} still have the "
                               "expected linkto file: {}".format(
                                   new_hashed2.hashedbrickobject._fqpath,
                                   str(new_hashed2.newname))))

        g.log.info("The Destination link file is removed as expected")
    def test_file_rename_when_source_and_dest_hash_same_subvol(self):
        """
        - Destination file should exist
        - Source file is hashed sub volume(s1) and cached on another
          subvolume(s2)
        - Destination file should be hased to same subvolume(s1) where
          source file is hased
        - Destination hashed on subvolume(s1) but should be cached on
          subvolume(s2) where source file is stored
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be there on hashed subvolume and
          should link to new destination file
        - source link file should be removed
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create soruce file and Get hashed subvol (s2)
        source_hashed_subvol, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file such that the new name hashes to a new subvol (S1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(
            new_hashed,
            ("could'nt find new hashed for {}".format(source_file)))

        # Verify the subvols are not same for source and destination files
        self.assertNotEqual(src_count, new_hashed.subvol_count,
                            "The subvols for src and dest are same.")

        # Rename/Move the file
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            "Failed to move files {} and {}".format(source_file, dest_file))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The hashed subvol {} doesn't have the "
                  "expected linkto file: {}".format(src_link_subvol._fqpath,
                                                    str(new_hashed.newname))))

        # Get a file name that stores to S2 for destination
        new_hashed2 = find_specific_hashed(self.subvols, "/",
                                           source_hashed_subvol)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Create destination file in subvol S2
        dest_hashed_subvol, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))

        # Verify the subvol is S2 itself
        self.assertEqual(
            dest_count, src_count,
            "The destination file is not stored to desired "
            "subvol :{}".format(dest_count))

        # Create a linkfile to dest by renaming it to hash to S1
        dest_hashed = find_specific_hashed(self.subvols, "/", src_link_subvol,
                                           new_hashed.newname)
        # Verify the subvol is S1
        self.assertEqual(
            dest_hashed.subvol_count, new_hashed.subvol_count,
            "The destination file is not stored to desired "
            "subvol :{}, instead to subvol : {}".format(
                dest_hashed.subvol_count, new_hashed))

        # Rename the dest file to the new file name
        dest_file_2 = "{}/{}".format(self.mount_point,
                                     str(dest_hashed.newname))
        ret = move_file(self.mounts[0].client_system, dest_file, dest_file_2)
        self.assertTrue(
            ret,
            "Failed to move files {} and {}".format(source_file, dest_file_2))

        # Rename source to destination
        src = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        dest_file = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
        ret = move_file(self.mounts[0].client_system, src, dest_file)
        self.assertTrue(
            ret, "Failed to move files {} and {}".format(src, dest_file))

        # Verify destination file is removed
        ret = self._verify_file_exists(dest_hashed_subvol,
                                       str(new_hashed2.newname))
        self.assertFalse(ret, ("Destination file : {} is not removed in subvol"
                               " : {}".format(str(new_hashed2.newname),
                                              dest_hashed_subvol._fqpath)))
        g.log.info("The destination file is removed as expected")

        # Verify the source link is removed
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(
            ret, ("The hashed subvol {} still have the "
                  "expected linkto file: {}".format(src_link_subvol._fqpath,
                                                    str(new_hashed.newname))))

        g.log.info("The source link file is removed as expected")

        # Verify the Destination link is on hashed subvolume
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(dest_hashed.newname))
        self.assertTrue(
            ret, ("The hashed subvol {} doesn't have the "
                  "expected linkto file: {}".format(dest_hashed_subvol._fqpath,
                                                    str(dest_hashed.newname))))

        # Verify the dest link file points to new destination file
        file_path = src_link_subvol._fqpath + str(dest_hashed.newname)
        ret = (self._verify_file_links_to_specified_destination(
            src_link_subvol._host, file_path, str(dest_hashed.newname)))
        self.assertTrue(
            ret, "The dest link file not pointing towards "
            "the desired file")
        g.log.info("The Destination link file is pointing to new file"
                   " as expected")
Example #18
0
    def test_file_rename_when_dest_hash_src_hashed_but_cache_diff(self):
        """
        - Destination file should exist
        - Source file is stored on hashed subvolume it self
        - Destination file should be hased to some other subvolume(s2)
        - Destination file hased on subvolume(s2) but cached on some other
          subvolume(s3)(neither s1 nor s2)
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination link file should be there on hashed subvolume and
          should link to new destination file
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create source file and Get hashed subvol (s1)
        _, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination to hash to some subvol S3
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "couldn't find new hashed for destination file")

        # Create Dest file in S3
        dest_cached, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))

        # Verify S1 and S3 are not same
        self.assertNotEqual(src_count, dest_count,
                            ("The destination file is cached to the source "
                             "cached subvol"))

        # Find new name for dest file, that it hashes to some other subvol S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, dest_count):
                subvol_new = brickdir
                break

        dest_hashed = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)
        # Move dest to new name
        dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
        ret = move_file(self.mounts[0].client_system, dest_file, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(dest_file, dest)))

        # Move Source file to Dest
        ret = move_file(self.mounts[0].client_system, source_file, dest)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(source_file, dest)))

        # Verify Destination File is removed
        ret = self._verify_file_exists(dest_cached,
                                       str(new_hashed.newname))
        self.assertFalse(ret, "The Source file is still present in {}"
                         .format(dest_cached._fqpath))

        # Verify Destination Link is present and points to new dest file
        ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
                                            str(dest_hashed.newname))
        self.assertTrue(ret, "The Dest link file is not present in {}"
                        .format(dest_hashed.hashedbrickobject._fqpath))

        file_path = dest_hashed.hashedbrickobject._fqpath + str(
            dest_hashed.newname)
        ret = (self._verify_file_links_to_specified_destination(
            dest_hashed.hashedbrickobject._host, file_path,
            str(dest_hashed.newname)))
        self.assertTrue(ret, "The dest link file not pointing towards "
                             "the desired file")
        g.log.info("The Destination link file is pointing to new file"
                   " as expected")
    def test_file_rename_dest_exist_and_hash_to_srclink_subvol(self):
        """
        Case 10:
        - Destination file should exist
        - Source file is hashed sub volume(s1) and
          cached on another subvolume(s2)
        - Destination file should be hashed to same subvolume(s1) where source
          file is hashed.
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file(cached) should be renamed to destination file
        - Source file(hashed) should be removed.
        - Destination hahshed file should be created on its
          hashed subvolume(s1)
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object

        # Get hashed subvol s2)
        _, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Rename the file to create link in another subvol - (s1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed, ("could not find new hashed subvol "
                                          "for {}".format(source_file)))

        self.assertNotEqual(src_count, new_hashed.subvol_count,
                            "New file should hash to different sub-volume")

        # Rename the source file to the new file name
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(
            ret,
            ("Failed to move file {} and {}".format(source_file, dest_file)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(
            ret, ("The New hashed volume {} doesn't have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))

        # Get a file name for dest to hash to the subvol s1
        new_hashed2 = find_specific_hashed(self.subvols, "/", src_link_subvol,
                                           new_hashed.newname)
        self.assertIsNotNone(new_hashed2,
                             ("Couldn't find a name hashed to the"
                              " given subvol {}".format(src_link_subvol)))
        # Create destination file in the subvol (s2)
        dest_hashed_subvol, dest_count, dest_file = (
            self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))

        # Verify the subvol is same as S1
        self.assertEqual(new_hashed.subvol_count, dest_count,
                         "The subvols for src and dest are not same.")

        # Move the source file to the new file name
        source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(ret, "Failed to move file")

        # Verify destination file is removed
        ret = self._verify_file_exists(dest_hashed_subvol,
                                       str(new_hashed2.newname))
        self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
                              " : {}".format(str(new_hashed.newname),
                                             dest_hashed_subvol._fqpath)))
        g.log.info("The destination file is removed as expected")

        # Check that the source link file is removed.
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(
            ret, ("The hashed volume {} still have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed.newname))))
        g.log.info("The source link file is removed")

        # Check Destination link file is created on its hashed sub-volume(s1)
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(
            ret, ("The New hashed volume {} doesn't have the "
                  "expected linkto file {}".format(src_link_subvol._fqpath,
                                                   str(new_hashed2.newname))))
        g.log.info("Destinaion link is created in desired subvol")
    def test_enabling_gluster_debug_mode(self):

        # pylint: disable=too-many-statements
        """
        Testcase:
        1. Stop glusterd.
        2. Change log level to DEBUG in
           /usr/local/lib/systemd/system/glusterd.service.
        3. Remove glusterd log
        4. Start glusterd
        5. Issue some gluster commands
        6. Check for debug messages in glusterd log
        """
        # Stop glusterd
        ret = stop_glusterd(self.mnode)
        self.assertTrue(ret, "Failed to stop glusterd on %s" % self.mnode)
        g.log.info("Successfully stopped glusterd.")

        # Change log level in /usr/lib/systemd/system/glusterd.service
        # to DEBUG
        glusterd_file = "/usr/lib/systemd/system/glusterd.service"
        ret = find_and_replace_in_file(self.mnode, 'LOG_LEVEL=INFO',
                                       'LOG_LEVEL=DEBUG', glusterd_file)
        self.assertTrue(ret, "Unable to change Log_LEVEL to DEBUG.")

        # Archive old glusterd.log file.
        ret = move_file(self.mnode, '/var/log/glusterfs/glusterd.log',
                        '/var/log/glusterfs/old.log')
        self.assertTrue(ret, "Renaming the glusterd log is failed")
        g.log.info("Successfully renamed glusterd.log file.")

        # Daemon reloading as the unit file of the daemon changed
        ret = daemon_reload(self.mnode)
        self.assertTrue(ret, "Daemon reloaded successfully")

        # Start glusterd
        ret = start_glusterd(self.mnode)
        self.assertTrue(ret, "Failed to start glusterd on %s" % self.mnode)
        g.log.info('Successfully to started glusterd.')

        # Check if glusterd is running or not.
        count = 0
        while count < 60:
            ret = is_glusterd_running(self.mnode)
            if ret:
                break
            sleep(2)
            count += 1
        self.assertEqual(ret, 0, "glusterd is not running on %s" % self.mnode)
        g.log.info('glusterd is running after changing log_level to debug.')

        # Instead of executing commands in loop, if glusterd is restarted in
        # one of the nodes in the cluster the handshake messages
        # will be in debug mode.
        ret = restart_glusterd(self.servers[1])
        self.assertTrue(ret, "restarted successfully")

        count = 0
        while count < 60:
            ret = is_glusterd_running(self.mnode)
            if ret:
                break
            sleep(2)
            count += 1
        self.assertEqual(ret, 0, "glusterd is not running on %s" % self.mnode)
        g.log.info('glusterd is running after changing log_level to debug.')

        # Check glusterd logs for debug messages
        glusterd_log_file = "/var/log/glusterfs/glusterd.log"
        ret = check_if_pattern_in_file(self.mnode, ' D ', glusterd_log_file)
        self.assertEqual(ret, 0, "Debug messages are not present in log.")
        g.log.info("Debug messages are present in log.")
Example #21
0
    def test_access_file_with_stale_linkto_xattr(self):
        """
        Description: Checks if the files are accessible as non-root user if
                     the files have stale linkto xattr.
        Steps:
        1) Create a volume and start it.
        2) Mount the volume on client node using FUSE.
        3) Create a file.
        4) Enable performance.parallel-readdir and
           performance.readdir-ahead on the volume.
        5) Rename the file in order to create
           a linkto file.
        6) Force the linkto xattr values to become stale by changing the dht
           subvols in the graph
        7) Login as an non-root user and access the file.
        """
        # pylint: disable=protected-access

        # Set permissions on the mount-point
        m_point = self.mounts[0].mountpoint
        ret = set_file_permissions(self.clients[0], m_point, "-R 777")
        self.assertTrue(ret, "Failed to set file permissions")
        g.log.info("Successfully set file permissions on mount-point")

        # Creating a file on the mount-point
        cmd = 'dd if=/dev/urandom of={}/FILE-1 count=1 bs=16k'.format(m_point)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "File to create file")

        # Enable performance.parallel-readdir and
        # performance.readdir-ahead on the volume
        options = {
            "performance.parallel-readdir": "enable",
            "performance.readdir-ahead": "enable"
        }
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Failed to set volume options")
        g.log.info("Successfully set volume options")

        # Finding a file name such that renaming source file to it will form a
        # linkto file
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        newhash = find_new_hashed(subvols, "/", "FILE-1")
        new_name = str(newhash.newname)
        new_host = str(newhash.hashedbrickobject._host)
        new_name_path = str(newhash.hashedbrickobject._fqpath)[:-1]

        # Move file such that it hashes to some other subvol and forms linkto
        # file
        ret = move_file(self.clients[0], "{}/FILE-1".format(m_point),
                        "{}/{}".format(m_point, new_name))
        self.assertTrue(ret, "Rename failed")
        g.log.info('Renamed file %s to %s', "{}/FILE-1".format(m_point),
                   "{}/{}".format(m_point, new_name))

        # Check if "dst_file" is linkto file
        ret = is_linkto_file(new_host, '{}{}'.format(new_name_path, new_name))
        self.assertTrue(ret, "File is not a linkto file")
        g.log.info("File is linkto file")

        # Force the linkto xattr values to become stale by changing the dht
        # subvols in the graph; for that:
        # disable performance.parallel-readdir and
        # performance.readdir-ahead on the volume
        options = {
            "performance.parallel-readdir": "disable",
            "performance.readdir-ahead": "disable"
        }
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Failed to disable volume options")
        g.log.info("Successfully disabled volume options")

        # Access the file as non-root user
        cmd = "ls -lR {}".format(m_point)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd, user="******")
        self.assertEqual(ret, 0, "Lookup failed ")
        g.log.info("Lookup successful")
    def tearDown(self):

        # Stop glusterd
        ret = stop_glusterd(self.mnode)
        if not ret:
            raise ExecutionError("Failed to stop glusterd on %s" % self.mnode)
        g.log.info("Successfully stopped glusterd.")

        # Reverting log level in /usr/lib/systemd/system/glusterd.service
        # to INFO
        glusterd_file = "/usr/lib/systemd/system/glusterd.service"
        ret = find_and_replace_in_file(self.mnode, 'LOG_LEVEL=DEBUG',
                                       'LOG_LEVEL=INFO', glusterd_file)
        if not ret:
            raise ExecutionError("Changes")
        g.log.info("Changes to glusterd.services reverted.")

        # Archiving the glusterd log file of test case.
        ret = move_file(self.mnode, '/var/log/glusterfs/glusterd.log',
                        '/var/log/glusterfs/EnableDebugMode-glusterd.log')
        if not ret:
            raise ExecutionError("Archiving present log file failed.")
        g.log.info("Archiving present log file successful.")

        # Reverting back to old glusterd log file.
        ret = move_file(self.mnode, '/var/log/glusterfs/old.log',
                        '/var/log/glusterfs/glusterd.log')
        if not ret:
            raise ExecutionError("Reverting glusterd log failed.")
        g.log.info("Reverting of glusterd log successful.")

        # Daemon should be reloaded as unit file is changed
        ret = daemon_reload(self.mnode)
        if not ret:
            raise ExecutionError("Unable to reload the daemon")
        g.log.info("Daemon reloaded successfully")

        # Restart glusterd
        ret = start_glusterd(self.mnode)
        if not ret:
            raise ExecutionError("Failed to start glusterd on %s" % self.mnode)
        g.log.info("Successfully restarted glusterd.")

        # Checking if glusterd is running on all nodes or not.
        count = 0
        while count < 60:
            ret = is_glusterd_running(self.mnode)
            if not ret:
                break
            sleep(2)
            count += 1
        if ret:
            raise ExecutionError("glusterd is not running on %s" % self.mnode)
        g.log.info("glusterd running with log level INFO.")

        # Checking if peers are in connected state or not.
        count = 0
        while count < 60:
            ret = self.validate_peers_are_connected()
            if ret:
                break
            sleep(3)
            count += 1
        if not ret:
            raise ExecutionError("Peers are not in connected state.")
        g.log.info("Peers are in connected state.")

        self.get_super_method(self, 'tearDown')()
Example #23
0
    def test_rename_directory_no_destination_folder(self):
        """Test rename directory with no destination folder"""
        dirs = {
            'initial': '{root}/folder_{client_index}',
            'new_folder': '{root}/folder_renamed{client_index}'
        }

        for mount_index, mount_obj in enumerate(self.mounts):
            client_host = mount_obj.client_system
            mountpoint = mount_obj.mountpoint
            initial_folder = dirs['initial'].format(
                root=mount_obj.mountpoint,
                client_index=mount_index
            )

            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=LAYOUT_IS_COMPLETE,
                                        file_type=FILETYPE_DIRS)
            self.assertTrue(ret, "Expected - Layout is complete")
            g.log.info('Layout is complete')

            # Create source folder on mount point
            self.assertTrue(mkdir(client_host, initial_folder),
                            'Failed creating source directory')
            self.assertTrue(file_exists(client_host, initial_folder))
            g.log.info('Created source directory %s on mount point %s',
                       initial_folder, mountpoint)

            # Create files and directories
            ret = self.create_files(client_host, initial_folder, self.files,
                                    content='Textual content')

            self.assertTrue(ret, 'Unable to create files on mount point')
            g.log.info('Files and directories are created')

            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS)
            self.assertTrue(ret, "Expected - Files and dirs are stored "
                            "on hashed bricks")
            g.log.info('Files and dirs are stored on hashed bricks')

            new_folder_name = dirs['new_folder'].format(
                root=mountpoint,
                client_index=mount_index
            )
            # Check if destination dir does not exist
            self.assertFalse(file_exists(client_host, new_folder_name),
                             'Expected New folder name should not exists')
            # Rename source folder
            ret = move_file(client_host, initial_folder,
                            new_folder_name)
            self.assertTrue(ret, "Rename direcoty failed")
            g.log.info('Renamed directory %s to %s', initial_folder,
                       new_folder_name)

            # Old dir does not exists and destination is presented
            self.assertFalse(file_exists(client_host, initial_folder),
                             '%s should be not listed' % initial_folder)
            g.log.info('The old directory %s does not exists on mount point',
                       initial_folder)
            self.assertTrue(file_exists(client_host, new_folder_name),
                            'Destination dir does not exists %s' %
                            new_folder_name)
            g.log.info('The new folder is presented %s', new_folder_name)

            # Check bricks for source and destination directories
            for brick_item in get_all_bricks(self.mnode, self.volname):
                brick_host, brick_dir = brick_item.split(':')

                initial_folder = dirs['initial'].format(
                    root=brick_dir,
                    client_index=mount_index
                )
                new_folder_name = dirs['new_folder'].format(
                    root=brick_dir,
                    client_index=mount_index
                )

                self.assertFalse(file_exists(brick_host, initial_folder),
                                 "Expected folder %s to be not presented" %
                                 initial_folder)
                self.assertTrue(file_exists(brick_host, new_folder_name),
                                'Expected folder %s to be presented' %
                                new_folder_name)

                g.log.info('The old directory %s does not exists and directory'
                           ' %s is presented', initial_folder, new_folder_name)
        g.log.info('Rename directory when destination directory '
                   'does not exists is successful')
Example #24
0
    def test_file_rename_when_dest_doesnt_hash_src_cached_or_hashed(self):
        """
        - Destination file should exist
        - Source file is hashed on sub volume(s1) and cached on
          another subvolume(s2)
        - Destination file should be hased to subvolume(s3) other
          than above two subvolumes
        - Destination file hased on subvolume(s3) but destination file
          should be cached on same subvolume(s2) where source file is stored
            mv <source_file> <destination_file>
        - Destination file is removed.
        - Source file should be renamed as destination file
        - Destination file hashed on subvolume and should link
          to new destination file
        - source link file should be removed
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals

        # Create source file and Get hashed subvol (s2)
        src_subvol, src_count, source_file = (
            self._create_file_and_get_hashed_subvol("test_source_file"))

        # Find a new file name for destination file, which hashes
        # to another subvol (s1)
        new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
        self.assertIsNotNone(new_hashed,
                             "couldn't find new hashed for destination file")

        # Rename the source file to the new file name
        dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, source_file, dest_file)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(source_file, dest_file)))

        # Verify the Source link file is stored on hashed sub volume(s1)
        src_link_subvol = new_hashed.hashedbrickobject
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(src_link_subvol._fqpath,
                                      str(new_hashed.newname))))

        # Identify a file name for dest to get stored in S2
        dest_cached_subvol = find_specific_hashed(self.subvols,
                                                  "/",
                                                  src_subvol)
        # Create the file with identified name
        _, _, dst_file = (
            self._create_file_and_get_hashed_subvol(
                str(dest_cached_subvol.newname)))
        # Verify its in S2 itself
        self.assertEqual(dest_cached_subvol.subvol_count, src_count,
                         ("The subvol found for destination is not same as "
                          "that of the source file cached subvol"))

        # Find a subvol (s3) for dest file to linkto, other than S1 and S2
        brickobject = create_brickobjectlist(self.subvols, "/")
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        br_count = -1
        subvol_new = None
        for brickdir in brickobject:
            br_count += 1
            if br_count not in (src_count, new_hashed.subvol_count):
                subvol_new = brickdir
                break

        new_hashed2 = find_specific_hashed(self.subvols,
                                           "/",
                                           subvol_new)
        self.assertIsNotNone(new_hashed2,
                             "could not find new hashed for dstfile")

        # Verify the subvol is not same as S1(src_count) and S2(dest_count)
        self.assertNotEqual(new_hashed2.subvol_count, src_count,
                            ("The subvol found for destination is same as that"
                             " of the source file cached subvol"))
        self.assertNotEqual(new_hashed2.subvol_count, new_hashed.subvol_count,
                            ("The subvol found for destination is same as that"
                             " of the source file hashed subvol"))

        # Rename the dest file to the new file name
        dst_file_ln = "{}/{}".format(self.mount_point,
                                     str(new_hashed2.newname))
        ret = move_file(self.mounts[0].client_system, dst_file, dst_file_ln)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(dst_file, dst_file_ln)))

        # Verify the Dest link file is stored on hashed sub volume(s3)
        dest_link_subvol = new_hashed2.hashedbrickobject
        ret = self._verify_link_file_exists(dest_link_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(dest_link_subvol._fqpath,
                                      str(new_hashed2.newname))))

        # Move/Rename Source File to Dest
        src_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
        ret = move_file(self.mounts[0].client_system, src_file, dst_file)
        self.assertTrue(ret, ("Failed to move file {} and {}"
                              .format(src_file, dst_file)))

        # Verify Source file is removed
        ret = self._verify_file_exists(src_subvol, "test_source_file")
        self.assertFalse(ret, "The source file is still present in {}"
                         .format(src_subvol._fqpath))

        # Verify Source link is removed
        ret = self._verify_link_file_exists(src_link_subvol,
                                            str(new_hashed.newname))
        self.assertFalse(ret, "The source link file is still present in {}"
                         .format(src_link_subvol._fqpath))

        # Verify the Destination link is on hashed subvolume
        ret = self._verify_link_file_exists(dest_link_subvol,
                                            str(new_hashed2.newname))
        self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
                              "expected linkto file: {}"
                              .format(dest_link_subvol._fqpath,
                                      str(new_hashed2.newname))))

        # Verify the dest link file points to new destination file
        file_path = dest_link_subvol._fqpath + str(new_hashed2.newname)
        ret = (self._verify_file_links_to_specified_destination(
            dest_link_subvol._host, file_path,
            str(dest_cached_subvol.newname)))
        self.assertTrue(ret, "The dest link file not pointing towards "
                             "the desired file")
        g.log.info("The Destination link file is pointing to new file"
                   " as expected")