def _verify_link_file_exists(brickdir, file_name):
        """ Verifies whether a file link is present in given subvol
        Args:
               brickdir(Class Object): BrickDir object containing data about
                                       bricks under a specific subvol
        Returns:
                True/False(bool): Based on existance of file link
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object
        file_path = brickdir._fqpath + file_name
        file_stat = get_file_stat(brickdir._host, file_path)
        if file_stat is None:
            g.log.error("Failed to get File stat for %s", file_path)
            return False
        if not file_stat['access'] == "1000":
            g.log.error("Access value not 1000 for %s", file_path)
            return False

        # Check for file type to be'sticky empty', have size of 0 and
        # have the glusterfs.dht.linkto xattr set.
        ret = is_linkto_file(brickdir._host, file_path)
        if not ret:
            g.log.error("%s is not a linkto file", file_path)
            return False
        return True
Exemplo n.º 2
0
    def test_access_file_with_stale_linkto_xattr(self):
        """
        Description: Checks if the files are accessible as non-root user if
                     the files have stale linkto xattr.
        Steps:
        1) Create a volume and start it.
        2) Mount the volume on client node using FUSE.
        3) Create a file.
        4) Enable performance.parallel-readdir and
           performance.readdir-ahead on the volume.
        5) Rename the file in order to create
           a linkto file.
        6) Force the linkto xattr values to become stale by changing the dht
           subvols in the graph
        7) Login as an non-root user and access the file.
        """
        # pylint: disable=protected-access

        # Set permissions on the mount-point
        m_point = self.mounts[0].mountpoint
        ret = set_file_permissions(self.clients[0], m_point, "-R 777")
        self.assertTrue(ret, "Failed to set file permissions")
        g.log.info("Successfully set file permissions on mount-point")

        # Creating a file on the mount-point
        cmd = 'dd if=/dev/urandom of={}/FILE-1 count=1 bs=16k'.format(m_point)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "File to create file")

        # Enable performance.parallel-readdir and
        # performance.readdir-ahead on the volume
        options = {
            "performance.parallel-readdir": "enable",
            "performance.readdir-ahead": "enable"
        }
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Failed to set volume options")
        g.log.info("Successfully set volume options")

        # Finding a file name such that renaming source file to it will form a
        # linkto file
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        newhash = find_new_hashed(subvols, "/", "FILE-1")
        new_name = str(newhash.newname)
        new_host = str(newhash.hashedbrickobject._host)
        new_name_path = str(newhash.hashedbrickobject._fqpath)[:-1]

        # Move file such that it hashes to some other subvol and forms linkto
        # file
        ret = move_file(self.clients[0], "{}/FILE-1".format(m_point),
                        "{}/{}".format(m_point, new_name))
        self.assertTrue(ret, "Rename failed")
        g.log.info('Renamed file %s to %s', "{}/FILE-1".format(m_point),
                   "{}/{}".format(m_point, new_name))

        # Check if "dst_file" is linkto file
        ret = is_linkto_file(new_host, '{}{}'.format(new_name_path, new_name))
        self.assertTrue(ret, "File is not a linkto file")
        g.log.info("File is linkto file")

        # Force the linkto xattr values to become stale by changing the dht
        # subvols in the graph; for that:
        # disable performance.parallel-readdir and
        # performance.readdir-ahead on the volume
        options = {
            "performance.parallel-readdir": "disable",
            "performance.readdir-ahead": "disable"
        }
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Failed to disable volume options")
        g.log.info("Successfully disabled volume options")

        # Access the file as non-root user
        cmd = "ls -lR {}".format(m_point)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd, user="******")
        self.assertEqual(ret, 0, "Lookup failed ")
        g.log.info("Lookup successful")
    def test_delete_file_in_migration(self):
        """
        Verify that if a file is picked for migration and then deleted, the
        file should be removed successfully.
        * First create a big data file of 10GB.
        * Rename that file, such that after rename a linkto file is created
          (we are doing this to make sure that file is picked for migration.)
        * Add bricks to the volume and trigger rebalance using force option.
        * When the file has been picked for migration, delete that file from
          the mount point.
        * Check whether the file has been deleted or not on the mount-point
          as well as the back-end bricks.
        """

        # pylint: disable=too-many-statements
        # pylint: disable=too-many-locals
        # pylint: disable=protected-access

        mountpoint = self.mounts[0].mountpoint

        # Location of source file
        src_file = mountpoint + '/file1'

        # Finding a file name such that renaming source file to it will form a
        # linkto file
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        newhash = find_new_hashed(subvols, "/", "file1")
        new_name = str(newhash.newname)
        new_host = str(newhash.hashedbrickobject._host)
        new_name_path = str(newhash.hashedbrickobject._fqpath)[:-2]

        # Location of destination file to which source file will be renamed
        dst_file = '{}/{}'.format(mountpoint, new_name)
        # Create a 10GB file source file
        cmd = (
            "dd if=/dev/urandom of={} bs=1024K count=10000".format(src_file))
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("File {} creation failed".format(src_file)))

        # Move file such that it hashes to some other subvol and forms linkto
        # file
        ret = move_file(self.clients[0], src_file, dst_file)
        self.assertTrue(ret, "Rename failed")
        g.log.info('Renamed file %s to %s', src_file, dst_file)

        # Check if "file_two" is linkto file
        ret = is_linkto_file(new_host, '{}/{}'.format(new_name_path, new_name))
        self.assertTrue(ret, "File is not a linkto file")
        g.log.info("File is linkto file")

        # Expanding volume by adding bricks to the volume
        ret, _, _ = add_brick(self.mnode,
                              self.volname,
                              self.add_brick_list,
                              force=True)
        self.assertEqual(ret, 0,
                         ("Volume {}: Add-brick failed".format(self.volname)))
        g.log.info("Volume %s: add-brick successful", self.volname)

        # Log Volume Info and Status after expanding the volume
        log_volume_info_and_status(self.mnode, self.volname)

        # Start Rebalance
        ret, _, _ = rebalance_start(self.mnode, self.volname, force=True)
        self.assertEqual(
            ret, 0,
            ("Volume {}: Failed to start rebalance".format(self.volname)))
        g.log.info("Volume %s : Rebalance started ", self.volname)

        # Check if rebalance is running and delete the file
        status_info = get_rebalance_status(self.mnode, self.volname)
        status = status_info['aggregate']['statusStr']
        self.assertEqual(status, 'in progress', "Rebalance is not running")
        ret, _, _ = g.run(self.clients[0], (" rm -rf {}".format(dst_file)))
        self.assertEqual(ret, 0, ("Cannot delete file {}".format(dst_file)))
        g.log.info("File is deleted")

        # Check if the file is present on the mount point
        ret, _, _ = g.run(self.clients[0], ("ls -l {}".format(dst_file)))
        self.assertEqual(ret, 2, ("Failed to delete file {}".format(dst_file)))

        # Check if the file is present on the backend bricks
        bricks = get_all_bricks(self.mnode, self.volname)
        for brick in bricks:
            node, brick_path = brick.split(':')
            ret, _, _ = g.run(node, "ls -l {}/{}".format(brick_path, new_name))
            self.assertEqual(
                ret, 2, "File is still present on"
                " back-end brick: {}".format(brick_path))
            g.log.info("File is deleted from back-end brick: %s", brick_path)

        # Check if rebalance process is still running
        for server in self.servers:
            ret, _, _ = g.run(server, "pgrep rebalance")
            self.assertEqual(ret, 1, ("Rebalance process is still"
                                      " running on server {}".format(server)))
            g.log.info("Rebalance process is not running")