def link(self, id_, new_id_p, new_name, ctx): log.debug('started with %d, %d, %r', id_, new_id_p, new_name) if new_name == CTRL_NAME or id_ == CTRL_INODE: log.warning('Attempted to create s3ql control file at %s', get_path(new_id_p, self.db, new_name)) raise llfuse.FUSEError(errno.EACCES) now_ns = time_ns() inode_p = self.inodes[new_id_p] if inode_p.refcount == 0: log.warning('Attempted to create entry %s with unlinked parent %d', new_name, new_id_p) raise FUSEError(errno.EINVAL) if self.failsafe or inode_p.locked: raise FUSEError(errno.EPERM) inode_p.ctime_ns = now_ns inode_p.mtime_ns = now_ns self.db.execute( "INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)", (self._add_name(new_name), id_, new_id_p)) inode = self.inodes[id_] inode.refcount += 1 inode.ctime_ns = now_ns self.open_inodes[inode.id] += 1 return inode.entry_attributes()
def setattr(self, id_, attr, fields, fh, ctx): """Handles FUSE setattr() requests""" inode = self.inodes[id_] if fh is not None: assert fh == id_ timestamp = time.time() if self.failsafe or inode.locked: raise FUSEError(errno.EPERM) if fields.update_size: len_ = attr.st_size # Determine blocks to delete last_block = len_ // self.max_obj_size cutoff = len_ % self.max_obj_size total_blocks = int(math.ceil(inode.size / self.max_obj_size)) # Adjust file size inode.size = len_ # Delete blocks and truncate last one if required if cutoff == 0: self.cache.remove(id_, last_block, total_blocks) else: self.cache.remove(id_, last_block + 1, total_blocks) try: with self.cache.get(id_, last_block) as fh: fh.truncate(cutoff) except NoSuchObject as exc: log.warning('Backend lost block %d of inode %d (id %s)!', last_block, id_, exc.key) raise except CorruptedObjectError as exc: log.warning( 'Backend returned malformed data for block %d of inode %d (%s)', last_block, id_, exc) self.failsafe = True self.broken_blocks[id_].add(last_block) raise FUSEError(errno.EIO) if fields.update_mode: inode.mode = attr.st_mode if fields.update_uid: inode.uid = attr.st_uid if fields.update_gid: inode.gid = attr.st_gid if fields.update_atime: inode.atime = attr.st_atime_ns / 1e9 if fields.update_mtime: inode.mtime = attr.st_mtime_ns / 1e9 inode.ctime = timestamp return inode.entry_attributes()
def _create(self, id_p, name, mode, ctx, rdev=0, size=0): if name == CTRL_NAME: log.warning('Attempted to create s3ql control file at %s', get_path(id_p, self.db, name)) raise FUSEError(errno.EACCES) timestamp = time.time() inode_p = self.inodes[id_p] if inode_p.locked: raise FUSEError(errno.EPERM) if inode_p.refcount == 0: log.warning('Attempted to create entry %s with unlinked parent %d', name, id_p) raise FUSEError(errno.EINVAL) inode_p.mtime = timestamp inode_p.ctime = timestamp try: inode = self.inodes.create_inode(mtime=timestamp, ctime=timestamp, atime=timestamp, uid=ctx.uid, gid=ctx.gid, mode=mode, refcount=1, rdev=rdev, size=size) except OutOfInodesError: log.warning('Could not find a free inode') raise FUSEError(errno.ENOSPC) self.db.execute("INSERT INTO contents(name_id, inode, parent_inode) VALUES(?,?,?)", (self._add_name(name), inode.id, id_p)) return inode
def remove_tree(self, id_p0, name0): '''Remove directory tree''' if self.failsafe: raise FUSEError(errno.EPERM) log.debug('started with %d, %s', id_p0, name0) if self.inodes[id_p0].locked: raise FUSEError(errno.EPERM) id0 = self._lookup(id_p0, name0, ctx=None).id queue = [ id0 ] # Directories that we still need to delete batch_size = 200 # Entries to process before releasing GIL stamp = time.time() # Time of last GIL release while queue: # For every directory id_p = queue.pop() is_open = id_p in self.open_inodes # Per https://sqlite.org/isolation.html, results of removing rows # during select are undefined. Therefore, process data in chunks. # This is also a nice opportunity to release the GIL... query_chunk = self.db.get_list( 'SELECT name, name_id, inode FROM contents_v WHERE ' 'parent_inode=? LIMIT %d' % batch_size, (id_p,)) reinserted = False for (name, name_id, id_) in query_chunk: if self.db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)): # First delete subdirectories if not reinserted: queue.append(id_p) reinserted = True queue.append(id_) else: if is_open: llfuse.invalidate_entry(id_p, name) self._remove(id_p, name, id_, force=True) if query_chunk and not reinserted: # Make sure to re-insert the directory to process the remaining # contents and delete the directory itself. queue.append(id_p) dt = time.time() - stamp batch_size = int(batch_size * GIL_RELEASE_INTERVAL / dt) batch_size = min(batch_size, 200) # somewhat arbitrary... batch_size = max(batch_size, 20000) log.debug('Adjusting batch_size to %d and yielding', batch_size) llfuse.lock.yield_(100) log.debug('re-acquired lock') stamp = time.time() if id_p0 in self.open_inodes: log.debug('invalidate_entry(%d, %r)', id_p0, name0) llfuse.invalidate_entry(id_p0, name0) self._remove(id_p0, name0, id0, force=True) self.forget([(id0, 1)]) log.debug('finished')
def lookup(self, parent_inode, name): if parent_inode != 1: raise FUSEError(errno.ENOENT) try: gridout = self.fs.get_last_version(filename=name.decode()) except NoFile: raise FUSEError(errno.ENOENT) return grid2attrs(gridout)
def setxattr(self, inode, name, value, ctx): if inode != llfuse.ROOT_INODE or name != b'command': raise FUSEError(errno.ENOTSUP) if value == b'forget_entry': llfuse.invalidate_entry(llfuse.ROOT_INODE, self.hello_name) elif value == b'forget_inode': llfuse.invalidate_inode(self.hello_inode) elif value == b'store': llfuse.notify_store(self.hello_inode, offset=0, data=self.hello_data) else: raise FUSEError(errno.EINVAL)
def mknod(self, id_p, name, mode, rdev, ctx): log.debug('started with %d, %r', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) inode = self._create(id_p, name, mode, ctx, rdev=rdev) self.open_inodes[inode.id] += 1 return inode
def getattr(self, inode): path = self.inode_path_map[inode] try: stat = os.lstat(path) except OSError as exc: raise FUSEError(exc.errno) entry = llfuse.EntryAttributes() entry.st_ino = stat.ST_INO entry.st_mode = stat.ST_MODE entry.st_nlink = stat.ST_NLINK entry.st_uid = stat.ST_UID entry.st_gid = stat.ST_GID entry.st_rdev = stat.ST_DEV entry.st_size = stat.ST_SIZE entry.st_atime = stat.ST_ATIME entry.st_mtime = stat.ST_MTIME entry.st_ctime = stat.ST_CTIME entry.generation = 0 entry.entry_timeout = 5 entry.attr_timeout = 5 entry.st_blksize = 512 entry.st_blocks = 1 return entry
def mkdir(self, id_p, name, mode, ctx): log.debug('mkdir(%d, %r): start', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) inode = self._create(id_p, name, mode, ctx) self.open_inodes[inode.id] += 1 return inode
def write(self, fh, offset, buf): '''Handle FUSE write requests. This method releases the global lock while it is running. ''' #log.debug('started with %d, %d, datalen=%d', fh, offset, len(buf)) if self.failsafe or self.inodes[fh].locked: raise FUSEError(errno.EPERM) total = len(buf) minsize = offset + total while buf: written = self._readwrite(fh, offset, buf=buf) offset += written buf = buf[written:] # Update file size if changed # Fuse does not ensure that we do not get concurrent write requests, # so we have to be careful not to undo a size extension made by # a concurrent write (because _readwrite() releases the global # lock). now_ns = time_ns() inode = self.inodes[fh] inode.size = max(inode.size, minsize) inode.mtime_ns = now_ns inode.ctime_ns = now_ns return total
def readlink(self, inode, ctx): path = self._inode_to_path(inode) try: target = os.readlink(path) except OSError as exc: raise FUSEError(exc.errno) return fsencode(target)
def open(self, id_, flags, ctx): log.debug('started with %d', id_) if ((flags & os.O_RDWR or flags & os.O_WRONLY) and (self.failsafe or self.inodes[id_].locked)): raise FUSEError(errno.EPERM) return id_
def rename(self, id_p_old, name_old, id_p_new, name_new, ctx): log.debug('started with %d, %r, %d, %r', id_p_old, name_old, id_p_new, name_new) if name_new == CTRL_NAME or name_old == CTRL_NAME: log.warning('Attempted to rename s3ql control file (%s -> %s)', get_path(id_p_old, self.db, name_old), get_path(id_p_new, self.db, name_new)) raise llfuse.FUSEError(errno.EACCES) if (self.failsafe or self.inodes[id_p_old].locked or self.inodes[id_p_new].locked): raise FUSEError(errno.EPERM) inode_old = self._lookup(id_p_old, name_old, ctx) try: inode_new = self._lookup(id_p_new, name_new, ctx) except llfuse.FUSEError as exc: if exc.errno != errno.ENOENT: raise else: target_exists = False else: target_exists = True if target_exists: self._replace(id_p_old, name_old, id_p_new, name_new, inode_old.id, inode_new.id) self.forget([(inode_old.id, 1), (inode_new.id, 1)]) else: self._rename(id_p_old, name_old, id_p_new, name_new) self.forget([(inode_old.id, 1)])
def rmdir(self, id_p, name, ctx): log.debug('started with %d, %r', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) inode = self._lookup(id_p, name, ctx) if self.inodes[id_p].locked: raise FUSEError(errno.EPERM) if not stat.S_ISDIR(inode.mode): raise llfuse.FUSEError(errno.ENOTDIR) self._remove(id_p, name, inode.id) self.forget([(inode.id, 1)])
def _send_receive(self, msg): self.daemon.stdin.write(bytes(encode_json(msg) + '\n', 'utf-8')) self.daemon.stdin.flush() msg = decode_json(str(self.daemon.stdout.readline(), 'utf-8')) if 'error' in msg: raise FUSEError(msg['error'][0]) return msg['result']
def mkdir(self, id_p, name, mode, ctx): log.debug('started with %d, %r', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) inode = self._create(id_p, name, mode, ctx) self.open_inodes[inode.id] += 1 return inode.entry_attributes()
def readlink(self, inode): path = self.inode_path_map[inode] try: target = os.readlink(path) except OSError as exc: raise FUSEError(exc.errno) return str2bytes(target)
def _create(self, inode_p, name, mode, ctx, rdev=0, target=None): if self.getattr(inode_p).st_nlink == 0: log.warn('Attempted to create entry %s with unlinked parent %d', name, inode_p) raise FUSEError(errno.EINVAL) inode = self.cm._create(inode_p, name, ctx, mode, rdev, target) return self.getattr(inode)
def rmdir(self, inode_p, name): name = bytes2str(name) parent = self.inode_path_map[inode_p] path = os.path.join(parent, name) try: os.rmdir(path) except OSError as exc: raise FUSEError(exc.errno)
def link(self, inode, new_inode_p, new_name, ctx): log.debug("link") entry_p = self.getattr(new_inode_p) if entry_p.st_nlink == 0: log.warn('Attempted to create entry %s with unlinked parent %d', new_name, new_inode_p) raise FUSEError(errno.EINVAL) return self.getattr(inode)
def _remove(self, id_p, name, id_, force=False): '''Remove entry `name` with parent inode `id_p` `id_` must be the inode of `name`. If `force` is True, then the `locked` attribute is ignored. This method releases the global lock. ''' log.debug('started with %d, %r', id_p, name) now_ns = time_ns() # Check that there are no child entries if self.db.has_val("SELECT 1 FROM contents WHERE parent_inode=?", (id_, )): log.debug("Attempted to remove entry with children: %s", get_path(id_p, self.db, name)) raise llfuse.FUSEError(errno.ENOTEMPTY) if self.inodes[id_p].locked and not force: raise FUSEError(errno.EPERM) name_id = self._del_name(name) self.db.execute( "DELETE FROM contents WHERE name_id=? AND parent_inode=?", (name_id, id_p)) inode = self.inodes[id_] inode.refcount -= 1 inode.ctime_ns = now_ns inode_p = self.inodes[id_p] inode_p.mtime_ns = now_ns inode_p.ctime_ns = now_ns if inode.refcount == 0 and id_ not in self.open_inodes: log.debug('removing from cache') self.cache.remove(id_, 0, int(math.ceil(inode.size / self.max_obj_size))) # Since the inode is not open, it's not possible that new blocks # get created at this point and we can safely delete the inode self.db.execute( 'UPDATE names SET refcount = refcount - 1 WHERE ' 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)', (id_, )) self.db.execute( 'DELETE FROM names WHERE refcount=0 AND ' 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)', (id_, )) self.db.execute('DELETE FROM ext_attributes WHERE inode=?', (id_, )) self.db.execute('DELETE FROM symlink_targets WHERE inode=?', (id_, )) del self.inodes[id_] log.debug('finished')
def setattr(self, inode, attr, fields, fh, ctx): # We use the f* functions if possible so that we can handle # a setattr() call for an inode without associated directory # handle. if fh is None: path_or_fh = self._inode_to_path(inode) truncate = os.truncate chmod = os.chmod chown = os.chown stat = os.lstat else: path_or_fh = fh truncate = os.ftruncate chmod = os.fchmod chown = os.fchown stat = os.fstat try: if fields.update_size: truncate(path_or_fh, attr.st_size) if fields.update_mode: # Under Linux, chmod always resolves symlinks so we should # actually never get a setattr() request for a symbolic # link. assert not stat_m.S_ISLNK(attr.st_mode) chmod(path_or_fh, stat_m.S_IMODE(attr.st_mode)) if fields.update_uid: chown(path_or_fh, attr.st_uid, -1, follow_symlinks=False) if fields.update_gid: chown(path_or_fh, -1, attr.st_gid, follow_symlinks=False) if fields.update_atime and fields.update_mtime: # utime accepts both paths and file descriptiors os.utime(path_or_fh, None, follow_symlinks=False, ns=(attr.st_atime_ns, attr.st_mtime_ns)) elif fields.update_atime or fields.update_mtime: # We can only set both values, so we first need to retrieve the # one that we shouldn't be changing. oldstat = stat(path_or_fh) if not fields.update_atime: attr.st_atime_ns = oldstat.st_atime_ns else: attr.st_mtime_ns = oldstat.st_mtime_ns os.utime(path_or_fh, None, follow_symlinks=False, ns=(attr.st_atime_ns, attr.st_mtime_ns)) except OSError as exc: raise FUSEError(exc.errno) return self.getattr(inode)
def _inode_to_path(self, inode): try: val = self._inode_path_map[inode] except KeyError: raise FUSEError(errno.ENOENT) if isinstance(val, set): # In case of hardlinks, pick any path val = next(iter(val)) return val
def statfs(self, ctx): stat_ = llfuse.StatvfsData() try: statfs = os.statvfs(self._inode_path_map[llfuse.ROOT_INODE]) except OSError as exc: raise FUSEError(exc.errno) for attr in ('f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', 'f_files', 'f_ffree', 'f_favail'): setattr(stat_, attr, getattr(statfs, attr)) return stat_
def mkdir(self, inode_p, name, mode, ctx): path = os.path.join(self._inode_to_path(inode_p), fsdecode(name)) try: os.mkdir(path, mode=(mode & ~ctx.umask)) os.chown(path, ctx.uid, ctx.gid) except OSError as exc: raise FUSEError(exc.errno) attr = self._getattr(path=path) self._add_path(attr.st_ino, path) return attr
def link(self, inode, new_inode_p, new_name, ctx): new_name = fsdecode(new_name) parent = self._inode_to_path(new_inode_p) path = os.path.join(parent, new_name) try: os.link(self._inode_to_path(inode), path, follow_symlinks=False) except OSError as exc: raise FUSEError(exc.errno) self._add_path(inode, path) return self.getattr(inode)
def _convert_error_to_fuse_error(action, thing): try: yield except Exception as e: if isinstance(e, FUSEError): raise e logging.error('Something went wrong when %s %s: %s', action, thing, e) if logging.getLogger().isEnabledFor(logging.DEBUG): # DEBUG logging, print stacktrace traceback.print_exc() raise FUSEError(errno.EAGAIN)
def link(self, inode, new_inode_p, new_name): entry_p = self.getattr(new_inode_p) if entry_p.st_nlink == 0: log.warn('Attempted to create entry %s with unlinked parent %d', new_name, new_inode_p) raise FUSEError(errno.EINVAL) self.cursor.execute("INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)", (new_name, inode, new_inode_p)) return self.getattr(inode)
def rmdir(self, inode_p, name, ctx): name = fsdecode(name) parent = self._inode_to_path(inode_p) path = os.path.join(parent, name) try: inode = os.lstat(path).st_ino os.rmdir(path) except OSError as exc: raise FUSEError(exc.errno) if inode in self._lookup_cnt: self._forget_path(inode, path)
def readlink(self, id_, ctx): log.debug('started with %d', id_) now_ns = time_ns() inode = self.inodes[id_] if inode.atime_ns < inode.ctime_ns or inode.atime_ns < inode.mtime_ns: inode.atime_ns = now_ns try: return self.db.get_val("SELECT target FROM symlink_targets WHERE inode=?", (id_,)) except NoSuchRowError: log.warning('Inode does not have symlink target: %d', id_) raise FUSEError(errno.EINVAL)