def statfs(self, ctx): """Return stats on the file-system""" logger.debug("STATFS called") stat_fs = llfuse.StatvfsData() # file system block size stat_fs.f_bsize = self._super_block.block_size # fragment size / fundamental file system block size stat_fs.f_frsize = stat_fs.f_bsize # fetch total and used size from superblock total_size = self._super_block.total_size used_size = self._super_block.used_size # total number of blocks stat_fs.f_blocks = total_size // stat_fs.f_frsize # total number of free blocks stat_fs.f_bfree = (total_size - used_size) // stat_fs.f_frsize # tota number of free blocks avaliable to non-privileged processes stat_fs.f_bavail = stat_fs.f_bfree # total of file nodes/inodes on the system stat_fs.f_files = self._super_block.max_inodes # total of free file nodes/inodes stat_fs.f_ffree = (stat_fs.f_files - self._super_block.inode_counter) # total number of avaliable of free nodes/inodes avaliable to non-privileged processes stat_fs.f_favail = stat_fs.f_ffree return stat_fs
def statfs(self, ctx): assert self._initialized d = llfuse.StatvfsData() # from man page: # fsblkcnt_t f_bavail; /* # free blocks for unprivileged users */ # fsblkcnt_t f_bfree; /* # free blocks */ # fsblkcnt_t f_blocks; /* size of fs in f_frsize units */ # unsigned long f_bsize; /* file system block size */ # fsfilcnt_t f_favail; /* # free inodes for unprivileged users */ # fsfilcnt_t f_ffree; /* # free inodes */ # fsfilcnt_t f_files; /* # inodes */ # unsigned long f_frsize; /* fragment size */ # these are so n/a it is not even funny # f_ffree # f_files # constants d.f_bsize = const.BLOCK_SIZE_LIMIT d.f_frsize = const.BLOCK_SIZE_LIMIT avail = self.forest.storage.get_bytes_available() used = self.forest.storage.get_bytes_used() # return st.f_bavail * st.f_frsize / 1024 / 1024 d.f_bfree = avail / d.f_frsize d.f_blocks = (avail + used) / d.f_frsize # unpriviliged have all resources d.f_bavail = d.f_bfree d.f_favail = d.f_ffree return d
def __init__(self, manifest, datastore): self.manifest = manifest self.datastore = datastore self.nodecache = [ None ] # inodes start with number 1 self.entrycache = [ None ] self.hardlinks = dict() self.filecache = defaultdict(int) self.inode_open_count = defaultdict(int) self.fstat = llfuse.StatvfsData() #optimal transfer block size self.fstat.f_bsize = int(self.manifest.root.stats.st_blksize) #total data blocks in file system self.fstat.f_blocks = 0 #free blocks in fs self.fstat.f_bfree = 0 #free blocks available to unprivileged user self.fstat.f_bavail = 0 #total file nodes in file system self.fstat.f_files = 0 #free file nodes in fs self.fstat.f_ffree = 0 self.fstat.f_favail = 0 #fragment size self.fstat.f_frsize = int(self.manifest.root.stats.st_blksize) self.highest_inode = 1 self.genEntryAndInode(manifest.root) for node in manifest: self.genEntryAndInode(node)
def statfs(self, ctx): stat_ = llfuse.StatvfsData() try: statfs = os.statvfs(self._inode_path_map[llfuse.ROOT_INODE]) except OSError as exc: raise FUSEError(exc.errno) for attr in ('f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', 'f_files', 'f_ffree', 'f_favail'): setattr(stat_, attr, getattr(statfs, attr)) return stat_
def statfs(self): stat_ = llfuse.StatvfsData() stat_.f_bsize = 512 stat_.f_frsize = 512 stat_.f_blocks = 0 stat_.f_bfree = 0 stat_.f_bavail = 0 stat_.f_files = 0 stat_.f_ffree = 0 stat_.f_favail = 0 return stat_
def statfs(self, ctx=None): stat_ = llfuse.StatvfsData() stat_.f_bsize = 512 stat_.f_frsize = 512 stat_.f_blocks = 0 stat_.f_bfree = 0 stat_.f_bavail = 0 stat_.f_files = 0 stat_.f_ffree = 0 stat_.f_favail = 0 stat_.f_namemax = 255 # == NAME_MAX (depends on archive source OS / FS) return stat_
def statfs(self): st = llfuse.StatvfsData() st.f_bsize = 512 st.f_frsize = 512 size = sum([int(e.describe['size']) for e in self.entries]) st.f_blocks = size // st.f_frsize st.f_bfree = 0 st.f_bavail = 0 st.f_files = len(self.entries) st.f_ffree = 0 st.f_favail = 0 return st
def statfs(self, ctx): log.debug("statfs") stat_ = llfuse.StatvfsData() stat_.f_bsize = 512 stat_.f_frsize = 512 stat_.f_bfree = max(size // stat_.f_frsize, 1024) stat_.f_bavail = stat_.f_bfree stat_.f_ffree = max(inodes, 100) stat_.f_favail = stat_.f_ffree return stat_
def test_copy(): for obj in (llfuse.SetattrFields(), llfuse.RequestContext(), llfuse.lock, llfuse.lock_released): pytest.raises(PicklingError, copy, obj) for (inst, attr) in ((llfuse.EntryAttributes(), 'st_mode'), (llfuse.StatvfsData(), 'f_files')): setattr(inst, attr, 42) inst_copy = copy(inst) assert getattr(inst, attr) == getattr(inst_copy, attr) inst = llfuse.FUSEError(10) assert inst.errno == copy(inst).errno
def statfs(self, ctx=None): st = llfuse.StatvfsData() st.f_bsize = 128 * 1024 st.f_blocks = 0 st.f_files = 0 st.f_bfree = 0 st.f_bavail = 0 st.f_ffree = 0 st.f_favail = 0 st.f_frsize = 0 return st
def statfs(self, ctx): attrs = llfuse.StatvfsData() arch_st = self.arch_st attrs.f_bsize = arch_st.st_blksize attrs.f_frsize = arch_st.st_blksize attrs.f_blocks = arch_st.st_blocks attrs.f_bfree = 0 attrs.f_bavail = 0 attrs.f_files = len(self.inodes) attrs.f_ffree = 0 attrs.f_favail = 0 return attrs
def statfs(self, ctx): stat = llfuse.StatvfsData() stat.f_bsize = 512 stat.f_frsize = 512 # We use dummy values here. stat.f_blocks = 0 stat.f_bfree = 0 stat.f_bavail = 0 stat.f_files = 0 stat.f_ffree = 0 stat.f_favail = 0 return stat
def statfs(self, ctx: llfuse.RequestContext) -> llfuse.StatvfsData:# {{{ pylint: disable=unused-argument """ to make output of df nicer man 2 statvfs """ stfs = llfuse.StatvfsData() stfs.f_bavail = 0 stfs.f_bfree = 0 stfs.f_blocks = self.whole_size stfs.f_bsize = 4096 stfs.f_favail = 0 stfs.f_ffree = 0 stfs.f_files = self.max_inode stfs.f_frsize = 1 return stfs
def statfs(self, ctx): """ to make output of df nicer man 2 statvfs """ stfs = llfuse.StatvfsData() stfs.f_bavail = 0 stfs.f_bfree = 0 stfs.f_blocks = self.whole_size stfs.f_bsize = 4096 stfs.f_favail = 0 stfs.f_ffree = 0 stfs.f_files = self.max_inode stfs.f_frsize = 1 return stfs
def statfs(self): st = llfuse.StatvfsData() st.f_bsize = 512 st.f_frsize = 512 used_size = sum([len(x.data) for x in all_entries.values()]) st.f_blocks = FS_SIZE // st.f_bsize st.f_bfree = (FS_SIZE - used_size) // st.f_bsize st.f_bavail = st.f_bfree st.f_files = MAX_INODES st.f_ffree = MAX_INODES - len(all_entries) st.f_favail = st.f_ffree return st
def statfs(self): log.debug('statfs') stat_ = llfuse.StatvfsData() stv = self.send_command_and_receive_response(("statvfs", self.root)) #stv = os.statvfs(self.root) stat_.f_bsize = stv.f_bsize stat_.f_frsize = stv.f_frsize stat_.f_blocks = stv.f_blocks stat_.f_bfree = stv.f_bfree stat_.f_bavail = stv.f_bavail stat_.f_files = stv.f_files stat_.f_ffree = stv.f_ffree stat_.f_favail = stv.f_favail return stat_
def statfs(self): stat_ = llfuse.StatvfsData() stat_.f_bsize = 512 stat_.f_frsize = 512 size = 9000 stat_.f_blocks = size // stat_.f_frsize stat_.f_bfree = max(size // stat_.f_frsize, 1024) stat_.f_bavail = stat_.f_bfree inodes = 9000 stat_.f_files = inodes stat_.f_ffree = max(inodes, 100) stat_.f_favail = stat_.f_ffree return stat_
def statfs(self): stat_ = llfuse.StatvfsData() free_bytes = 0 total_bytes = 0 stat_.f_bsize = 512 stat_.f_frsize = 512 size = total_bytes stat_.f_blocks = size // stat_.f_frsize stat_.f_bfree = free_bytes // stat_.f_frsize stat_.f_bavail = stat_.f_bfree stat_.f_favail = stat_.f_ffree = stat_.f_files = 10000 return stat_
def statfs(self): stat_ = llfuse.StatvfsData() stat_.f_bsize = 512 stat_.f_frsize = 512 size = self.get_row('SELECT SUM(size) FROM inodes')[0] stat_.f_blocks = size // stat_.f_frsize stat_.f_bfree = max(size // stat_.f_frsize, 1024) stat_.f_bavail = stat_.f_bfree inodes = self.get_row('SELECT COUNT(id) FROM inodes')[0] stat_.f_files = inodes stat_.f_ffree = max(inodes , 100) stat_.f_favail = stat_.f_ffree return stat_
def statfs(self, ctx): ''' Implement statfs(2). http://www.rath.org/llfuse-docs/operations.html#llfuse.Operations.statfs Currently bodges by reporting on the filesystem containing the current working directory, should really report on the filesystem holding the Store. That requires a Store.statfs method of some kind (TODO). ''' # TODO: get free space from the current Store # implies adding some kind of method to stores? st = os.statvfs(".") fst = llfuse.StatvfsData() for attr in ('f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', 'f_files', 'f_ffree', 'f_favail'): setattr(fst, attr, getattr(st, attr)) return fst
def statfs(self, ctx): log.debug('started') stat_ = llfuse.StatvfsData() # Get number of blocks & inodes blocks = self.db.get_val("SELECT COUNT(id) FROM objects") inodes = self.db.get_val("SELECT COUNT(id) FROM inodes") size = self.db.get_val('SELECT SUM(size) FROM blocks') if size is None: size = 0 # file system block size, i.e. the minimum amount of space that can # be allocated. This doesn't make much sense for S3QL, so we just # return the average size of stored blocks. stat_.f_frsize = max(4096, size // blocks) if blocks != 0 else 4096 # This should actually be the "preferred block size for doing IO. However, `df` incorrectly # interprets f_blocks, f_bfree and f_bavail in terms of f_bsize rather than f_frsize as it # should (according to statvfs(3)), so the only way to return correct values *and* have df # print something sensible is to set f_bsize and f_frsize to the same value. (cf. # http://bugs.debian.org/671490) stat_.f_bsize = stat_.f_frsize # size of fs in f_frsize units. Since backend is supposed to be unlimited, # always return a half-full filesystem, but at least 1 TB) fs_size = max(2 * size, 1024**4) stat_.f_blocks = fs_size // stat_.f_frsize stat_.f_bfree = (fs_size - size) // stat_.f_frsize stat_.f_bavail = stat_.f_bfree # free for non-root total_inodes = max(2 * inodes, 1000000) stat_.f_files = total_inodes stat_.f_ffree = total_inodes - inodes stat_.f_favail = total_inodes - inodes # free for non-root return stat_
def statfs(self, ctx): data = llfuse.StatvfsData() data.f_bsize = os.sysconf("SC_PAGE_SIZE") data.f_frsize = os.sysconf("SC_PAGE_SIZE") data.f_namemax = 255 return data
def statfs(self, ctx=None): return llfuse.StatvfsData()
def statfs(self, ctx): d = llfuse.StatvfsData() return d