Exemplo n.º 1
0
    def put(self, metadata, extension='.data'):
        """
        Finalize writing the file on disk, and renames it from the temp file
        to the real location.  This should be called after the data has been
        written to the temp file.

        :param metadata: dictionary of metadata to be written
        :param extension: extension to be used when making the file
        """
        assert self.tmppath is not None
        timestamp = normalize_timestamp(metadata['X-Timestamp'])
        metadata['name'] = self.disk_file.name
        # Write the metadata before calling fsync() so that both data and
        # metadata are flushed to disk.
        write_metadata(self.fd, metadata)
        # We call fsync() before calling drop_cache() to lower the amount of
        # redundant work the drop cache code will perform on the pages (now
        # that after fsync the pages will be all clean).
        tpool.execute(fsync, self.fd)
        # From the Department of the Redundancy Department, make sure we
        # call drop_cache() after fsync() to avoid redundant work (pages
        # all clean).
        drop_buffer_cache(self.fd, 0, self.upload_size)
        invalidate_hash(os.path.dirname(self.disk_file.datadir))
        # After the rename completes, this object will be available for other
        # requests to reference.
        renamer(self.tmppath,
                os.path.join(self.disk_file.datadir, timestamp + extension))
        self.disk_file.metadata = metadata
Exemplo n.º 2
0
    def write(self, chunk):
        """
        Write a chunk of data to disk. All invocations of this method must
        come before invoking the :func:

        For this implementation, the data is written into a temporary file.

        :param chunk: the chunk of data to write as a string object

        :returns: the total number of bytes written to an object
        """

        def _write_entire_chunk(chunk):
            while chunk:
                written = os.write(self._fd, chunk)
                self._upload_size += written
                chunk = chunk[written:]

        self._threadpool.run_in_thread(_write_entire_chunk, chunk)

        # For large files sync every 512MB (by default) written
        diff = self._upload_size - self._last_sync
        if diff >= self._bytes_per_sync:
            self._threadpool.force_run_in_thread(fdatasync, self._fd)
            drop_buffer_cache(self._fd, self._last_sync, diff)
            self._last_sync = self._upload_size

        return self._upload_size
Exemplo n.º 3
0
    def put(self, metadata, extension='.data'):
        """
        Finalize writing the file on disk, and renames it from the temp file
        to the real location.  This should be called after the data has been
        written to the temp file.

        :param metadata: dictionary of metadata to be written
        :param extension: extension to be used when making the file
        """
        assert self.tmppath is not None
        timestamp = normalize_timestamp(metadata['X-Timestamp'])
        metadata['name'] = self.disk_file.name
        # Write the metadata before calling fsync() so that both data and
        # metadata are flushed to disk.
        write_metadata(self.fd, metadata)
        # We call fsync() before calling drop_cache() to lower the amount of
        # redundant work the drop cache code will perform on the pages (now
        # that after fsync the pages will be all clean).
        tpool.execute(fsync, self.fd)
        # From the Department of the Redundancy Department, make sure we
        # call drop_cache() after fsync() to avoid redundant work (pages
        # all clean).
        drop_buffer_cache(self.fd, 0, self.upload_size)
        invalidate_hash(os.path.dirname(self.disk_file.datadir))
        # After the rename completes, this object will be available for other
        # requests to reference.
        renamer(self.tmppath,
                os.path.join(self.disk_file.datadir, timestamp + extension))
        self.disk_file.metadata = metadata
Exemplo n.º 4
0
    def write(self, chunk):
        """
        Write a chunk of data to disk. All invocations of this method must
        come before invoking the :func:

        For this implementation, the data is written into a temporary file.

        :param chunk: the chunk of data to write as a string object

        :returns: the total number of bytes written to an object
        """
        def _write_entire_chunk(chunk):
            while chunk:
                written = os.write(self._fd, chunk)
                self._upload_size += written
                chunk = chunk[written:]

        self._threadpool.run_in_thread(_write_entire_chunk, chunk)

        # For large files sync every 512MB (by default) written
        diff = self._upload_size - self._last_sync
        if diff >= self._bytes_per_sync:
            self._threadpool.force_run_in_thread(fdatasync, self._fd)
            drop_buffer_cache(self._fd, self._last_sync, diff)
            self._last_sync = self._upload_size

        return self._upload_size
Exemplo n.º 5
0
    def write(self, chunk):
        """
        Write a chunk of data into the temporary file.

        :param chunk: the chunk of data to write as a string object
        """
        while chunk:
            written = os.write(self.fd, chunk)
            self.upload_size += written
            chunk = chunk[written:]
            # For large files sync every 512MB (by default) written
            diff = self.upload_size - self.last_sync
            if diff >= self.disk_file.bytes_per_sync:
                tpool.execute(fdatasync, self.fd)
                drop_buffer_cache(self.fd, self.last_sync, diff)
                self.last_sync = self.upload_size
Exemplo n.º 6
0
 def finalize_put():
     # Write the metadata before calling fsync() so that both data and
     # metadata are flushed to disk.
     write_metadata(self.fd, metadata)
     # We call fsync() before calling drop_cache() to lower the amount
     # of redundant work the drop cache code will perform on the pages
     # (now that after fsync the pages will be all clean).
     fsync(self.fd)
     # From the Department of the Redundancy Department, make sure
     # we call drop_cache() after fsync() to avoid redundant work
     # (pages all clean).
     drop_buffer_cache(self.fd, 0, self.upload_size)
     invalidate_hash(os.path.dirname(self.disk_file.datadir))
     # After the rename completes, this object will be available for
     # other requests to reference.
     renamer(self.tmppath, os.path.join(self.disk_file.datadir, timestamp + extension))
Exemplo n.º 7
0
    def write(self, chunk):
        """
        Write a chunk of data into the temporary file.

        :param chunk: the chunk of data to write as a string object
        """
        while chunk:
            written = os.write(self.fd, chunk)
            self.upload_size += written
            chunk = chunk[written:]
            # For large files sync every 512MB (by default) written
            diff = self.upload_size - self.last_sync
            if diff >= self.disk_file.bytes_per_sync:
                tpool.execute(fdatasync, self.fd)
                drop_buffer_cache(self.fd, self.last_sync, diff)
                self.last_sync = self.upload_size
Exemplo n.º 8
0
 def _finalize_put(self, metadata, target_path):
     # Write the metadata before calling fsync() so that both data and
     # metadata are flushed to disk.
     write_metadata(self._fd, metadata)
     # We call fsync() before calling drop_cache() to lower the amount of
     # redundant work the drop cache code will perform on the pages (now
     # that after fsync the pages will be all clean).
     fsync(self._fd)
     # From the Department of the Redundancy Department, make sure we call
     # drop_cache() after fsync() to avoid redundant work (pages all
     # clean).
     drop_buffer_cache(self._fd, 0, self._upload_size)
     invalidate_hash(dirname(self._datadir))
     # After the rename completes, this object will be available for other
     # requests to reference.
     renamer(self._tmppath, target_path)
     hash_cleanup_listdir(self._datadir)
Exemplo n.º 9
0
 def _finalize_put(self, metadata, target_path):
     # Write the metadata before calling fsync() so that both data and
     # metadata are flushed to disk.
     write_metadata(self.fd, metadata)
     # We call fsync() before calling drop_cache() to lower the amount
     # of redundant work the drop cache code will perform on the pages
     # (now that after fsync the pages will be all clean).
     fsync(self.fd)
     # From the Department of the Redundancy Department, make sure
     # we call drop_cache() after fsync() to avoid redundant work
     # (pages all clean).
     drop_buffer_cache(self.fd, 0, self.upload_size)
     invalidate_hash(dirname(self.disk_file.datadir))
     # After the rename completes, this object will be available for
     # other requests to reference.
     renamer(self.tmppath, target_path)
     hash_cleanup_listdir(self.disk_file.datadir)
Exemplo n.º 10
0
    def _finalize_put(self, metadata, target_path):
	#在调用fsync()之前写元数据,因此,元数据和数据都刷到磁盘上
        # Write the metadata before calling fsync() so that both data and
        # metadata are flushed to disk.
        write_metadata(self.fd, metadata)
        # We call fsync() before calling drop_cache() to lower the amount
        # of redundant work the drop cache code will perform on the pages
        # (now that after fsync the pages will be all clean).
        fsync(self.fd)
        # From the Department of the Redundancy Department, make sure
        # we call drop_cache() after fsync() to avoid redundant work
        # (pages all clean).
        drop_buffer_cache(self.fd, 0, self.upload_size)
        invalidate_hash(dirname(self.disk_file.datadir))
        # After the rename completes, this object will be available for
        # other requests to reference.
        renamer(self.tmppath, target_path)
        hash_cleanup_listdir(self.disk_file.datadir)
Exemplo n.º 11
0
    def write(self, chunk):
        """
        Write a chunk of data into the temporary file.

        :param chunk: the chunk of data to write as a string object
        """
        def _write_entire_chunk(chunk):
            while chunk:
                written = os.write(self.fd, chunk)
                self.upload_size += written
                chunk = chunk[written:]

        self.threadpool.run_in_thread(_write_entire_chunk, chunk)

        # For large files sync every 512MB (by default) written
        diff = self.upload_size - self.last_sync
        if diff >= self.disk_file.bytes_per_sync:
            self.threadpool.force_run_in_thread(fdatasync, self.fd)
            drop_buffer_cache(self.fd, self.last_sync, diff)
            self.last_sync = self.upload_size
Exemplo n.º 12
0
    def write(self, chunk):
        """
        Write a chunk of data into the temporary file.

        :param chunk: the chunk of data to write as a string object
        """

        def _write_entire_chunk(chunk):
            while chunk:
                written = os.write(self.fd, chunk)
                self.upload_size += written
                chunk = chunk[written:]

        self.threadpool.run_in_thread(_write_entire_chunk, chunk)

        # For large files sync every 512MB (by default) written
        diff = self.upload_size - self.last_sync
        if diff >= self.disk_file.bytes_per_sync:
            self.threadpool.force_run_in_thread(fdatasync, self.fd)
            drop_buffer_cache(self.fd, self.last_sync, diff)
            self.last_sync = self.upload_size
Exemplo n.º 13
0
 def copy_action(self,src_file,dst_file,req,account,dbpath,tx_id):
     
     try:
         upload_expiration = time.time() + self.max_upload_time
         upload_size = 0
         last_sync = 0
         with dst_file.mkstemp() as (fd, tmppath):
             
             for chunk in src_file:
                 
                 upload_size += len(chunk)
                 if time.time() > upload_expiration:
                     task_db_update(dbpath,'request timeout',tx_id)
                     return jresponse('-1','request timeout',req,408)
                
                 while chunk:
                     written = os.write(fd, chunk)
                     chunk = chunk[written:]
                 # For large files sync every 512MB (by default) written
                 if upload_size - last_sync >= self.bytes_per_sync:
                     tpool.execute(os.fdatasync, fd)
                     drop_buffer_cache(fd, last_sync, upload_size - last_sync)
                     last_sync = upload_size
                 sleep()
             
             dst_file.copy_put(fd, tmppath)
         if dst_file.is_deleted():
             task_db_update(dbpath,'failed','conflict',tx_id)
             return jresponse('-1', 'conflict', req,409)
         
         dst_file.metadata = src_file.metadata
         dst_file.metadata['X-Timestamp'] = req.headers['x-timestamp']
         with dst_file.mkstemp() as (fd, tmppath):
             dst_file.put(fd, tmppath,dst_file.metadata, extension='.meta')
         self.account_update(req, account, src_file.metadata['Content-Length'], add_flag=True)
         task_db_update(dbpath,'success','',tx_id)
         
     except:
         task_db_update(dbpath,'failed','server exception',tx_id)
         syslog.syslog(syslog.LOG_ERR,'object copy: '+str(traceback.format_exc()))
Exemplo n.º 14
0
        def finalize_put():
            # Write out metadata before fsync() to ensure it is also forced to
            # disk.
            write_metadata(self.fd, metadata)

            # We call fsync() before calling drop_cache() to lower the
            # amount of redundant work the drop cache code will perform on
            # the pages (now that after fsync the pages will be all
            # clean).
            do_fsync(self.fd)
            # From the Department of the Redundancy Department, make sure
            # we call drop_cache() after fsync() to avoid redundant work
            # (pages all clean).
            drop_buffer_cache(self.fd, 0, self.upload_size)

            # At this point we know that the object's full directory path
            # exists, so we can just rename it directly without using Swift's
            # swift.common.utils.renamer(), which makes the directory path and
            # adds extra stat() calls.
            data_file = os.path.join(df.put_datadir, df._obj)
            attempts = 1
            while True:
                try:
                    os.rename(self.tmppath, data_file)
                except OSError as err:
                    if err.errno in (errno.ENOENT, errno.EIO) \
                            and attempts < MAX_RENAME_ATTEMPTS:
                        # FIXME: Why either of these two error conditions is
                        # happening is unknown at this point. This might be a
                        # FUSE issue of some sort or a possible race
                        # condition. So let's sleep on it, and double check
                        # the environment after a good nap.
                        _random_sleep()
                        # Tease out why this error occurred. The man page for
                        # rename reads:
                        #   "The link named by tmppath does not exist; or, a
                        #    directory component in data_file does not exist;
                        #    or, tmppath or data_file is an empty string."
                        assert len(self.tmppath) > 0 and len(data_file) > 0
                        tpstats = do_stat(self.tmppath)
                        tfstats = do_fstat(self.fd)
                        assert tfstats
                        if not tpstats or tfstats.st_ino != tpstats.st_ino:
                            # Temporary file name conflict
                            raise DiskFileError(
                                'DiskFile.put(): temporary file, %s, was'
                                ' already renamed (targeted for %s)' % (
                                    self.tmppath, data_file))
                        else:
                            # Data file target name now has a bad path!
                            dfstats = do_stat(df.put_datadir)
                            if not dfstats:
                                raise DiskFileError(
                                    'DiskFile.put(): path to object, %s, no'
                                    ' longer exists (targeted for %s)' % (
                                        df.put_datadir,
                                        data_file))
                            else:
                                is_dir = stat.S_ISDIR(dfstats.st_mode)
                                if not is_dir:
                                    raise DiskFileError(
                                        'DiskFile.put(): path to object, %s,'
                                        ' no longer a directory (targeted for'
                                        ' %s)' % (df.put_datadir,
                                                  data_file))
                                else:
                                    # Let's retry since everything looks okay
                                    logging.warn(
                                        "DiskFile.put(): os.rename('%s','%s')"
                                        " initially failed (%s) but a"
                                        " stat('%s') following that succeeded:"
                                        " %r" % (
                                            self.tmppath, data_file,
                                            str(err), df.put_datadir,
                                            dfstats))
                                    attempts += 1
                                    continue
                    else:
                        raise GlusterFileSystemOSError(
                            err.errno, "%s, os.rename('%s', '%s')" % (
                                err.strerror, self.tmppath, data_file))
                else:
                    # Success!
                    break
            # Close here so the calling context does not have to perform this
            # in a thread.
            do_close(self.fd)
Exemplo n.º 15
0
 def _drop_cache(self, fd, offset, length):
     """Method for no-oping buffer cache drop method."""
     if not self.keep_cache:
         drop_buffer_cache(fd, offset, length)
Exemplo n.º 16
0
 def drop_cache(self, fd, offset, length):
     
     if not self.keep_cache:
         drop_buffer_cache(fd, offset, length)
Exemplo n.º 17
0
                    return HTTPInsufficientStorage(drive=device,
                                                   request=request)
            reader = request.environ['wsgi.input'].read
            for chunk in iter(lambda: reader(self.network_chunk_size), ''):
                upload_size += len(chunk)
                if time.time() > upload_expiration:
                    self.logger.increment('PUT.timeouts')
                    return HTTPRequestTimeout(request=request)
                etag.update(chunk)
                while chunk:
                    written = os.write(fd, chunk)
                    chunk = chunk[written:]
                # For large files sync every 512MB (by default) written
                if upload_size - last_sync >= self.bytes_per_sync:
                    tpool.execute(fsync, fd)
                    drop_buffer_cache(fd, last_sync, upload_size - last_sync)
                    last_sync = upload_size
                sleep()

            if 'content-length' in request.headers and \
                    int(request.headers['content-length']) != upload_size:
                return HTTPClientDisconnect(request=request)
            etag = etag.hexdigest()
            if 'etag' in request.headers and \
                    request.headers['etag'].lower() != etag:
                return HTTPUnprocessableEntity(request=request)
            metadata = {
                'X-Timestamp': request.headers['x-timestamp'],
                'Content-Type': request.headers['content-type'],
                'ETag': etag,
                'Content-Length': str(os.fstat(fd).st_size),
Exemplo n.º 18
0
 def drop_cache(self, fd, offset, length):
     """Method for no-oping buffer cache drop method."""
     if not self.keep_cache:
         drop_buffer_cache(fd, offset, length)
Exemplo n.º 19
0
        with file.mkstemp() as (fd, tmppath):
            if 'content-length' in request.headers:
                fallocate(fd, int(request.headers['content-length']))
            for chunk in iter(lambda: request.body_file.read(
                    self.network_chunk_size), ''):
                upload_size += len(chunk)
                if time.time() > upload_expiration:
                    return HTTPRequestTimeout(request=request)
                etag.update(chunk)
                while chunk:
                    written = os.write(fd, chunk)
                    chunk = chunk[written:]
                # For large files sync every 512MB (by default) written
                if upload_size - last_sync >= self.bytes_per_sync:
                    tpool.execute(os.fdatasync, fd)
                    drop_buffer_cache(fd, last_sync, upload_size - last_sync)
                    last_sync = upload_size

            if 'content-length' in request.headers and \
                    int(request.headers['content-length']) != upload_size:
                return Response(status='499 Client Disconnect')
            etag = etag.hexdigest()
            if 'etag' in request.headers and \
                            request.headers['etag'].lower() != etag:
                return HTTPUnprocessableEntity(request=request)
            metadata = {
                'X-Timestamp': request.headers['x-timestamp'],
                'Content-Type': request.headers['content-type'],
                'ETag': etag,
                'Content-Length': str(os.fstat(fd).st_size),
            }