Пример #1
0
    def open(self):
        """
        Open the object.

        This implementation opens the data file representing the object, reads
        the associated metadata in the extended attributes, additionally
        combining metadata from fast-POST `.meta` files.

        .. note::

            An implementation is allowed to raise any of the following
            exceptions, but is only required to raise `DiskFileNotExist` when
            the object representation does not exist.

        :raises DiskFileNotExist: if the object does not exist
        :raises DiskFileExpired: if the object has expired
        :returns: itself for use as a context manager
        """
        # Writes are always performed to a temporary file
        try:
            fd = do_open(self._data_file, os.O_RDONLY | O_CLOEXEC)
        except SwiftOnFileSystemOSError as err:
            if err.errno in (errno.ENOENT, errno.ENOTDIR):
                # If the file does exist, or some part of the path does not
                # exist, raise the expected DiskFileNotExist
                raise DiskFileNotExist
            raise
        else:
            stats = do_fstat(fd)
            if not stats:
                return
            self._is_dir = stat.S_ISDIR(stats.st_mode)
            obj_size = stats.st_size

        self._metadata = read_metadata(fd)
        if not validate_object(self._metadata):
            create_object_metadata(fd)
            self._metadata = read_metadata(fd)
        assert self._metadata is not None
        self._filter_metadata()

        if self._is_dir:
            do_close(fd)
            obj_size = 0
            self._fd = -1
        else:
            if self._is_object_expired(self._metadata):
                raise DiskFileExpired(metadata=self._metadata)
            self._fd = fd

        self._obj_size = obj_size
        return self
Пример #2
0
    def open(self):
        """
        Open the object.

        This implementation opens the data file representing the object, reads
        the associated metadata in the extended attributes, additionally
        combining metadata from fast-POST `.meta` files.

        .. note::

            An implementation is allowed to raise any of the following
            exceptions, but is only required to raise `DiskFileNotExist` when
            the object representation does not exist.

        :raises DiskFileNotExist: if the object does not exist
        :raises DiskFileExpired: if the object has expired
        :returns: itself for use as a context manager
        """
        # Writes are always performed to a temporary file
        try:
            fd = do_open(self._data_file, os.O_RDONLY | O_CLOEXEC)
        except GlusterFileSystemOSError as err:
            if err.errno in (errno.ENOENT, errno.ENOTDIR):
                # If the file does exist, or some part of the path does not
                # exist, raise the expected DiskFileNotExist
                raise DiskFileNotExist
            raise
        else:
            stats = do_fstat(fd)
            if not stats:
                return
            self._is_dir = stat.S_ISDIR(stats.st_mode)
            obj_size = stats.st_size

        self._metadata = read_metadata(fd)
        if not validate_object(self._metadata):
            create_object_metadata(fd)
            self._metadata = read_metadata(fd)
        assert self._metadata is not None
        self._filter_metadata()

        if self._is_dir:
            do_close(fd)
            obj_size = 0
            self._fd = -1
        else:
            if self._is_object_expired(self._metadata):
                raise DiskFileExpired(metadata=self._metadata)
            self._fd = fd

        self._obj_size = obj_size
        return self
Пример #3
0
 def test_read_metadata_err(self):
     path = "/tmp/foo/r"
     expected_d = {'a': 'y'}
     xkey = _xkey(path, utils.METADATA_KEY)
     _xattrs[xkey] = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
     _xattr_get_err[xkey] = errno.EOPNOTSUPP
     try:
         utils.read_metadata(path)
     except IOError as e:
         assert e.errno == errno.EOPNOTSUPP
         assert (_xattr_op_cnt['get'] == 1), "%r" % _xattr_op_cnt
     else:
         self.fail("Expected an IOError exception on get")
Пример #4
0
    def _keep_sys_metadata(self, metadata):
        """
        Make sure system metadata is not lost when writing new user metadata

        This method will read the existing metadata and check for system
        metadata. If there are any, it should be appended to the metadata obj
        the user is trying to write.
        """
        # If metadata has been previously fetched, use that.
        # Stale metadata (outdated size/etag) would've been updated when
        # metadata is fetched for the first time.
        orig_metadata = self._metadata or read_metadata(self._data_file)

        sys_keys = [X_CONTENT_TYPE, X_ETAG, 'name', X_CONTENT_LENGTH,
                    X_OBJECT_TYPE, X_TYPE]

        for key in sys_keys:
            if key in orig_metadata:
                metadata[key] = orig_metadata[key]

        if X_OBJECT_TYPE not in orig_metadata:
            if metadata[X_CONTENT_TYPE].lower() == DIR_TYPE:
                metadata[X_OBJECT_TYPE] = DIR_OBJECT
            else:
                metadata[X_OBJECT_TYPE] = FILE

        if X_TYPE not in orig_metadata:
            metadata[X_TYPE] = OBJECT

        return metadata
Пример #5
0
    def _keep_sys_metadata(self, metadata):
        """
        Make sure system metadata is not lost when writing new user metadata

        This method will read the existing metadata and check for system
        metadata. If there are any, it should be appended to the metadata obj
        the user is trying to write.
        """
        # If metadata has been previously fetched, use that.
        # Stale metadata (outdated size/etag) would've been updated when
        # metadata is fetched for the first time.
        orig_metadata = self._metadata or read_metadata(self._data_file)

        sys_keys = [
            X_CONTENT_TYPE, X_ETAG, 'name', X_CONTENT_LENGTH, X_OBJECT_TYPE,
            X_TYPE
        ]

        for key in sys_keys:
            if key in orig_metadata:
                metadata[key] = orig_metadata[key]

        if X_OBJECT_TYPE not in orig_metadata:
            if metadata[X_CONTENT_TYPE].lower() == DIR_TYPE:
                metadata[X_OBJECT_TYPE] = DIR_OBJECT
            else:
                metadata[X_OBJECT_TYPE] = FILE

        if X_TYPE not in orig_metadata:
            metadata[X_TYPE] = OBJECT

        return metadata
Пример #6
0
    def delete(self, timestamp):
        """
        Delete the object.

        This implementation creates a tombstone file using the given
        timestamp, and removes any older versions of the object file. Any
        file that has an older timestamp than timestamp will be deleted.

        .. note::

            An implementation is free to use or ignore the timestamp
            parameter.

        :param timestamp: timestamp to compare with each file
        :raises DiskFileError: this implementation will raise the same
                            errors as the `create()` method.
        """
        try:
            metadata = read_metadata(self._data_file)
        except (IOError, OSError) as err:
            if err.errno != errno.ENOENT:
                raise
        else:
            if metadata[X_TIMESTAMP] >= timestamp:
                return

        self._threadpool.run_in_thread(self._unlinkold)

        self._metadata = None
        self._data_file = None
Пример #7
0
    def _unlinkold(self):
        if self._is_dir:
            # Marker, or object, directory.
            #
            # Delete from the filesystem only if it contains no objects.
            # If it does contain objects, then just remove the object
            # metadata tag which will make this directory a
            # fake-filesystem-only directory and will be deleted when the
            # container or parent directory is deleted.
            #
            # FIXME: Ideally we should use an atomic metadata update operation
            metadata = read_metadata(self._data_file)
            if dir_is_object(metadata):
                metadata[X_OBJECT_TYPE] = DIR_NON_OBJECT
                write_metadata(self._data_file, metadata)
            rmobjdir(self._data_file)
        else:
            # Delete file object
            do_unlink(self._data_file)

        # Garbage collection of non-object directories.  Now that we
        # deleted the file, determine if the current directory and any
        # parent directory may be deleted.
        dirname = os.path.dirname(self._data_file)
        while dirname and dirname != self._container_path:
            # Try to remove any directories that are not objects.
            if not rmobjdir(dirname):
                # If a directory with objects has been found, we can stop
                # garabe collection
                break
            else:
                dirname = os.path.dirname(dirname)
Пример #8
0
    def delete(self, timestamp):
        """
        Delete the object.

        This implementation creates a tombstone file using the given
        timestamp, and removes any older versions of the object file. Any
        file that has an older timestamp than timestamp will be deleted.

        .. note::

            An implementation is free to use or ignore the timestamp
            parameter.

        :param timestamp: timestamp to compare with each file
        :raises DiskFileError: this implementation will raise the same
                            errors as the `create()` method.
        """
        try:
            metadata = read_metadata(self._data_file)
        except (IOError, OSError) as err:
            if err.errno != errno.ENOENT:
                raise
        else:
            if metadata[X_TIMESTAMP] >= timestamp:
                return

        self._threadpool.run_in_thread(self._unlinkold)

        self._metadata = None
        self._data_file = None
Пример #9
0
    def _unlinkold(self):
        if self._is_dir:
            # Marker, or object, directory.
            #
            # Delete from the filesystem only if it contains no objects.
            # If it does contain objects, then just remove the object
            # metadata tag which will make this directory a
            # fake-filesystem-only directory and will be deleted when the
            # container or parent directory is deleted.
            #
            # FIXME: Ideally we should use an atomic metadata update operation
            metadata = read_metadata(self._data_file)
            if dir_is_object(metadata):
                metadata[X_OBJECT_TYPE] = DIR_NON_OBJECT
                write_metadata(self._data_file, metadata)
            rmobjdir(self._data_file)
        else:
            # Delete file object
            do_unlink(self._data_file)

        # Garbage collection of non-object directories.  Now that we
        # deleted the file, determine if the current directory and any
        # parent directory may be deleted.
        dirname = os.path.dirname(self._data_file)
        while dirname and dirname != self._container_path:
            # Try to remove any directories that are not objects.
            if not rmobjdir(dirname):
                # If a directory with objects has been found, we can stop
                # garabe collection
                break
            else:
                dirname = os.path.dirname(dirname)
Пример #10
0
 def test_read_metadata(self):
     path = "/tmp/foo/r"
     expected_d = {'a': 'y'}
     xkey = _xkey(path, utils.METADATA_KEY)
     _xattrs[xkey] = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
     res_d = utils.read_metadata(path)
     assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
     assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
Пример #11
0
 def test_read_metadata(self):
     path = "/tmp/foo/r"
     expected_d = {'a': 'y'}
     xkey = _xkey(path, utils.METADATA_KEY)
     _xattrs[xkey] = serialize_metadata(expected_d)
     res_d = utils.read_metadata(path)
     assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
     assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
Пример #12
0
 def test_read_metadata(self):
     path = "/tmp/foo/r"
     expected_d = {'a': 'y'}
     xkey = _xkey(path, utils.METADATA_KEY)
     _xattrs[xkey] = serialize_metadata(expected_d)
     res_d = utils.read_metadata(path)
     assert res_d == expected_d, "Expected %r, result %r" % (expected_d,
                                                             res_d)
     assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
Пример #13
0
 def test_read_metadata_multiple(self):
     path = "/tmp/foo/r"
     expected_d = {'a': 'y' * 150000}
     expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
     for i in range(0, 3):
         xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
         _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
         expected_p = expected_p[utils.MAX_XATTR_SIZE:]
     assert not expected_p
     res_d = utils.read_metadata(path)
     assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
     assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
Пример #14
0
 def test_read_metadata_multiple_one_missing(self):
     path = "/tmp/foo/r"
     expected_d = {'a': 'y' * 150000}
     expected_p = serialize_metadata(expected_d)
     for i in range(0, 2):
         xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
         _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
         expected_p = expected_p[utils.MAX_XATTR_SIZE:]
     assert len(expected_p) <= utils.MAX_XATTR_SIZE
     res_d = utils.read_metadata(path)
     assert res_d == {}
     assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
Пример #15
0
 def test_read_metadata_multiple_one_missing(self):
     path = "/tmp/foo/r"
     expected_d = {'a': 'y' * 150000}
     expected_p = serialize_metadata(expected_d)
     for i in range(0, 2):
         xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
         _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
         expected_p = expected_p[utils.MAX_XATTR_SIZE:]
     assert len(expected_p) <= utils.MAX_XATTR_SIZE
     res_d = utils.read_metadata(path)
     assert res_d == {}
     assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
Пример #16
0
 def test_read_metadata_multiple_one_missing(self):
     path = "/tmp/foo/r"
     expected_d = {'a': 'y' * 150000}
     expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL)
     for i in range(0, 2):
         xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or ''))
         _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE]
         expected_p = expected_p[utils.MAX_XATTR_SIZE:]
     assert len(expected_p) <= utils.MAX_XATTR_SIZE
     res_d = utils.read_metadata(path)
     assert res_d == {}
     assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
     assert len(_xattrs.keys()) == 0, "Expected 0 keys, found %d" % len(_xattrs.keys())
Пример #17
0
    def read_metadata(self):
        """
        Return the metadata for an object without requiring the caller to open
        the object first.

        This method is invoked by Swift code in POST, PUT, HEAD and DELETE path
        metadata = disk_file.read_metadata()

        The operations performed here is very similar to those made in open().
        This is to avoid opening and closing of file (two syscalls over
        network). IOW, this optimization addresses the case where the fd
        returned by open() isn't going to be used i.e the file is not read (GET
        or metadata recalculation)

        :returns: metadata dictionary for an object
        :raises DiskFileError: this implementation will raise the same
                            errors as the `open()` method.
        """
        try:
            self._metadata = read_metadata(self._data_file)
        except (OSError, IOError) as err:
            if err.errno in (errno.ENOENT, errno.ESTALE):
                raise DiskFileNotExist
            raise err

        if self._metadata and self._is_object_expired(self._metadata):
            raise DiskFileExpired(metadata=self._metadata)

        try:
            self._stat = do_stat(self._data_file)
            self._is_dir = stat.S_ISDIR(self._stat.st_mode)
        except (OSError, IOError) as err:
            if err.errno in (errno.ENOENT, errno.ESTALE):
                raise DiskFileNotExist
            raise err

        if not validate_object(self._metadata, self._stat):
            # Metadata is stale/invalid. So open the object for reading
            # to update Etag and other metadata.
            with self.open():
                return self.get_metadata()
        else:
            # Metadata is valid. Don't have to open the file.
            self._filter_metadata()
            return self._metadata
Пример #18
0
    def read_metadata(self):
        """
        Return the metadata for an object without requiring the caller to open
        the object first.

        This method is invoked by Swift code in POST, PUT, HEAD and DELETE path
        metadata = disk_file.read_metadata()

        The operations performed here is very similar to those made in open().
        This is to avoid opening and closing of file (two syscalls over
        network). IOW, this optimization addresses the case where the fd
        returned by open() isn't going to be used i.e the file is not read (GET
        or metadata recalculation)

        :returns: metadata dictionary for an object
        :raises DiskFileError: this implementation will raise the same
                            errors as the `open()` method.
        """
        try:
            self._metadata = read_metadata(self._data_file)
        except (OSError, IOError) as err:
            if err.errno in (errno.ENOENT, errno.ESTALE):
                raise DiskFileNotExist
            raise err

        if self._metadata and self._is_object_expired(self._metadata):
            raise DiskFileExpired(metadata=self._metadata)

        try:
            self._stat = do_stat(self._data_file)
            self._is_dir = stat.S_ISDIR(self._stat.st_mode)
        except (OSError, IOError) as err:
            if err.errno in (errno.ENOENT, errno.ESTALE):
                raise DiskFileNotExist
            raise err

        if not validate_object(self._metadata, self._stat):
            # Metadata is stale/invalid. So open the object for reading
            # to update Etag and other metadata.
            with self.open():
                return self.get_metadata()
        else:
            # Metadata is valid. Don't have to open the file.
            self._filter_metadata()
            return self._metadata
Пример #19
0
 def _clear_dir_object(self, obj):
     metadata = utils.read_metadata(os.path.join(self.rootdir, obj))
     metadata[utils.X_OBJECT_TYPE] = utils.DIR_NON_OBJECT
     utils.write_metadata(os.path.join(self.rootdir, obj),
                          metadata)
Пример #20
0
 def _set_dir_object(self, obj):
     metadata = utils.read_metadata(os.path.join(self.rootdir, obj))
     metadata[utils.X_OBJECT_TYPE] = utils.DIR_OBJECT
     utils.write_metadata(os.path.join(self.rootdir, self.dirs[0]),
                          metadata)
Пример #21
0
    def open(self):
        """
        Open the object.

        This implementation opens the data file representing the object, reads
        the associated metadata in the extended attributes, additionally
        combining metadata from fast-POST `.meta` files.

        .. note::

            An implementation is allowed to raise any of the following
            exceptions, but is only required to raise `DiskFileNotExist` when
            the object representation does not exist.

        :raises DiskFileNotExist: if the object does not exist
        :raises DiskFileExpired: if the object has expired
        :returns: itself for use as a context manager
        """
        # Writes are always performed to a temporary file
        try:
            self._fd = do_open(self._data_file, os.O_RDONLY | O_CLOEXEC)
        except SwiftOnFileSystemOSError as err:
            if err.errno in (errno.ENOENT, errno.ENOTDIR):
                # If the file does exist, or some part of the path does not
                # exist, raise the expected DiskFileNotExist
                raise DiskFileNotExist
            raise
        try:
            self._stat = do_fstat(self._fd)
            self._is_dir = stat.S_ISDIR(self._stat.st_mode)
            obj_size = self._stat.st_size

            self._metadata = read_metadata(self._fd)
            if not validate_object(self._metadata, self._stat):
                self._metadata = create_object_metadata(self._fd, self._stat,
                                                        self._metadata)
            assert self._metadata is not None
            self._filter_metadata()

            if self._is_dir:
                do_close(self._fd)
                obj_size = 0
                self._fd = -1
            else:
                if self._is_object_expired(self._metadata):
                    raise DiskFileExpired(metadata=self._metadata)
            self._obj_size = obj_size
        except (OSError, IOError, DiskFileExpired) as err:
            # Something went wrong. Context manager will not call
            # __exit__. So we close the fd manually here.
            self._close_fd()
            if hasattr(err, 'errno') and err.errno == errno.ENOENT:
                # Handle races: ENOENT can be raised by read_metadata()
                # call in GlusterFS if file gets deleted by another
                # client after do_open() succeeds
                logging.warn("open(%s) succeeded but one of the subsequent "
                             "syscalls failed with ENOENT. Raising "
                             "DiskFileNotExist." % (self._data_file))
                raise DiskFileNotExist
            else:
                # Re-raise the original exception after fd has been closed
                raise

        return self
Пример #22
0
    def open(self):
        """
        Open the object.

        This implementation opens the data file representing the object, reads
        the associated metadata in the extended attributes, additionally
        combining metadata from fast-POST `.meta` files.

        .. note::

            An implementation is allowed to raise any of the following
            exceptions, but is only required to raise `DiskFileNotExist` when
            the object representation does not exist.

        :raises DiskFileNotExist: if the object does not exist
        :raises DiskFileExpired: if the object has expired
        :returns: itself for use as a context manager
        """
        # Writes are always performed to a temporary file
        try:
            self._fd = do_open(self._data_file, os.O_RDONLY | O_CLOEXEC)
        except SwiftOnFileSystemOSError as err:
            if err.errno in (errno.ENOENT, errno.ENOTDIR):
                # If the file does exist, or some part of the path does not
                # exist, raise the expected DiskFileNotExist
                raise DiskFileNotExist
            raise
        try:
            self._stat = do_fstat(self._fd)
            self._is_dir = stat.S_ISDIR(self._stat.st_mode)
            obj_size = self._stat.st_size

            self._metadata = read_metadata(self._fd)
            if not validate_object(self._metadata, self._stat):
                create_object_metadata(self._fd)
                self._metadata = read_metadata(self._fd)
            assert self._metadata is not None
            self._filter_metadata()

            if self._is_dir:
                do_close(self._fd)
                obj_size = 0
                self._fd = -1
            else:
                if self._is_object_expired(self._metadata):
                    raise DiskFileExpired(metadata=self._metadata)
            self._obj_size = obj_size
        except (OSError, IOError, DiskFileExpired) as err:
            # Something went wrong. Context manager will not call
            # __exit__. So we close the fd manually here.
            self._close_fd()
            if hasattr(err, 'errno') and err.errno == errno.ENOENT:
                # Handle races: ENOENT can be raised by read_metadata()
                # call in GlusterFS if file gets deleted by another
                # client after do_open() succeeds
                logging.warn("open(%s) succeeded but one of the subsequent "
                             "syscalls failed with ENOENT. Raising "
                             "DiskFileNotExist." % (self._data_file))
                raise DiskFileNotExist
            else:
                # Re-raise the original exception after fd has been closed
                raise

        return self
Пример #23
0
 def test_read_metadata_notfound(self):
     path = "/tmp/foo/r"
     res_d = utils.read_metadata(path)
     assert res_d == {}
     assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt