def merge(self, items, fn, same, new_entry): '''Helper method for updating the contents of the directory. items: array with new directory contents fn: function to take an entry in 'items' and return the desired file or directory name same: function to compare an existing entry with an entry in the items list to determine whether to keep the existing entry. new_entry: function to create a new directory entry from array entry. ''' oldentries = self._entries self._entries = {} for i in items: n = fn(i) if n in oldentries and same(oldentries[n], i): self._entries[n] = oldentries[n] del oldentries[n] else: self._entries[n] = self.inodes.add_entry(new_entry(i)) for n in oldentries: llfuse.invalidate_entry(self.inode, str(n)) self.inodes.del_entry(oldentries[n]) self.fresh()
def remove_tree(self, id_p0, name0): '''Remove directory tree''' if self.failsafe: raise FUSEError(errno.EPERM) log.debug('started with %d, %s', id_p0, name0) if self.inodes[id_p0].locked: raise FUSEError(errno.EPERM) id0 = self._lookup(id_p0, name0, ctx=None).id queue = [ id0 ] # Directories that we still need to delete batch_size = 200 # Entries to process before releasing GIL stamp = time.time() # Time of last GIL release while queue: # For every directory id_p = queue.pop() is_open = id_p in self.open_inodes # Per https://sqlite.org/isolation.html, results of removing rows # during select are undefined. Therefore, process data in chunks. # This is also a nice opportunity to release the GIL... query_chunk = self.db.get_list( 'SELECT name, name_id, inode FROM contents_v WHERE ' 'parent_inode=? LIMIT %d' % batch_size, (id_p,)) reinserted = False for (name, name_id, id_) in query_chunk: if self.db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)): # First delete subdirectories if not reinserted: queue.append(id_p) reinserted = True queue.append(id_) else: if is_open: llfuse.invalidate_entry(id_p, name) self._remove(id_p, name, id_, force=True) if query_chunk and not reinserted: # Make sure to re-insert the directory to process the remaining # contents and delete the directory itself. queue.append(id_p) dt = time.time() - stamp batch_size = int(batch_size * GIL_RELEASE_INTERVAL / dt) batch_size = min(batch_size, 200) # somewhat arbitrary... batch_size = max(batch_size, 20000) log.debug('Adjusting batch_size to %d and yielding', batch_size) llfuse.lock.yield_(100) log.debug('re-acquired lock') stamp = time.time() if id_p0 in self.open_inodes: log.debug('invalidate_entry(%d, %r)', id_p0, name0) llfuse.invalidate_entry(id_p0, name0) self._remove(id_p0, name0, id0, force=True) self.forget([(id0, 1)]) log.debug('finished')
def clear(self): '''Delete all entries''' oldentries = self._entries self._entries = {} for n in oldentries: if isinstance(n, Directory): n.clear() llfuse.invalidate_entry(self.inode, str(n)) self.inodes.del_entry(oldentries[n]) self.invalidate()
def readdir(self, inode, off): if off != 0: raise StopIteration # fixme off = -1 for name, cinode in self._lookup[inode].items(): if inode == Inode.new: # we tell lies in create(), which the kernel caches llfuse.invalidate_entry(inode, name) yield (name, self.getattr(cinode), -1)
def setxattr(self, inode, name, value, ctx): if inode != llfuse.ROOT_INODE or name != b'command': raise FUSEError(errno.ENOTSUP) if value == b'forget_entry': llfuse.invalidate_entry(llfuse.ROOT_INODE, self.hello_name) elif value == b'forget_inode': llfuse.invalidate_inode(self.hello_inode) elif value == b'store': llfuse.notify_store(self.hello_inode, offset=0, data=self.hello_data) else: raise FUSEError(errno.EINVAL)
def remove_tree(self, id_p0, name0): """Remove directory tree""" log.debug("remove_tree(%d, %s): start", id_p0, name0) if self.inodes[id_p0].locked: raise FUSEError(errno.EPERM) id0 = self.lookup(id_p0, name0).id queue = [id0] processed = 0 # Number of steps since last GIL release stamp = time.time() # Time of last GIL release gil_step = 50 # Approx. number of steps between GIL releases while True: found_subdirs = False id_p = queue.pop() for (name_id, id_) in self.db.query("SELECT name_id, inode FROM contents WHERE " "parent_inode=?", (id_p,)): if self.db.has_val("SELECT 1 FROM contents WHERE parent_inode=?", (id_,)): if not found_subdirs: found_subdirs = True queue.append(id_p) queue.append(id_) else: name = self.db.get_val("SELECT name FROM names WHERE id=?", (name_id,)) llfuse.invalidate_entry(id_p, name) self._remove(id_p, name, id_, force=True) processed += 1 if processed > gil_step: if not found_subdirs: found_subdirs = True queue.append(id_p) break if not queue: llfuse.invalidate_entry(id_p0, name0) self._remove(id_p0, name0, id0, force=True) break if processed > gil_step: dt = time.time() - stamp gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 1) log.debug("remove_tree(%d, %s): Adjusting gil_step to %d", id_p0, name0, gil_step) processed = 0 llfuse.lock.yield_() stamp = time.time() log.debug("remove_tree(%d, %s): end", id_p0, name0)
def merge(self, items, fn, same, new_entry): '''Helper method for updating the contents of the directory. Takes a list describing the new contents of the directory, reuse entries that are the same in both the old and new lists, create new entries, and delete old entries missing from the new list. items: iterable with new directory contents fn: function to take an entry in 'items' and return the desired file or directory name, or None if this entry should be skipped same: function to compare an existing entry (a File or Directory object) with an entry in the items list to determine whether to keep the existing entry. new_entry: function to create a new directory entry (File or Directory object) from an entry in the items list. ''' oldentries = self._entries self._entries = {} changed = False for i in items: name = sanitize_filename(fn(i)) if name: if name in oldentries and same(oldentries[name], i): # move existing directory entry over self._entries[name] = oldentries[name] del oldentries[name] else: # create new directory entry ent = new_entry(i) if ent is not None: self._entries[name] = self.inodes.add_entry(ent) changed = True # delete any other directory entries that were not in found in 'items' for i in oldentries: llfuse.invalidate_entry(self.inode, str(i)) self.inodes.del_entry(oldentries[i]) changed = True if changed: self._mtime = time.time() self.fresh()
def invalidate_entry(self, entry, name): if entry.has_ref(False): # Only necessary if the kernel has previously done a lookup on this # inode and hasn't yet forgotten about it. llfuse.invalidate_entry(entry.inode, native(name.encode(self.encoding)))
def invalidate_entry(self, inode, name): llfuse.invalidate_entry(inode, name.encode(self.encoding))
def remove_tree(self, id_p0, name0): '''Remove directory tree''' if self.failsafe: raise FUSEError(errno.EPERM) log.debug('started with %d, %s', id_p0, name0) if self.inodes[id_p0].locked: raise FUSEError(errno.EPERM) id0 = self.lookup(id_p0, name0).id queue = [ id0 ] # Directories that we still need to delete processed = 0 # Number of steps since last GIL release stamp = time.time() # Time of last GIL release gil_step = 250 # Approx. number of steps between GIL releases while queue: # For every directory found_subdirs = False # Does current directory have subdirectories? id_p = queue.pop() if id_p in self.open_inodes: inval_entry = lambda x: llfuse.invalidate_entry(id_p, x) else: inval_entry = lambda x: None with self.db.query('SELECT name_id, inode FROM contents WHERE ' 'parent_inode=?', (id_p,)) as res: for (name_id, id_) in res: if self.db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)): if not found_subdirs: # When current directory has subdirectories, we must reinsert # it into queue found_subdirs = True queue.append(id_p) queue.append(id_) else: name = self.db.get_val("SELECT name FROM names WHERE id=?", (name_id,)) inval_entry(name) self._remove(id_p, name, id_, force=True) processed += 1 if processed > gil_step: # Also reinsert current directory if we need to yield to other threads if not found_subdirs: queue.append(id_p) break if processed > gil_step: dt = time.time() - stamp gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 250) log.debug('Adjusting gil_step to %d and yielding', gil_step) processed = 0 llfuse.lock.yield_(100) log.debug('re-acquired lock') stamp = time.time() if id_p0 in self.open_inodes: log.debug('invalidate_entry(%d, %r)', id_p0, name0) llfuse.invalidate_entry(id_p0, name0) self._remove(id_p0, name0, id0, force=True) self.forget([(id0, 1)]) log.debug('finished')
def remove_tree(self, id_p0, name0): '''Remove directory tree''' if self.failsafe: raise FUSEError(errno.EPERM) log.debug('started with %d, %s', id_p0, name0) if self.inodes[id_p0].locked: raise FUSEError(errno.EPERM) id0 = self._lookup(id_p0, name0, ctx=None).id queue = [id0] # Directories that we still need to delete processed = 0 # Number of steps since last GIL release stamp = time.time() # Time of last GIL release gil_step = 250 # Approx. number of steps between GIL releases while queue: # For every directory found_subdirs = False # Does current directory have subdirectories? id_p = queue.pop() if id_p in self.open_inodes: inval_entry = lambda x: llfuse.invalidate_entry(id_p, x) else: inval_entry = lambda x: None with self.db.query( 'SELECT name_id, inode FROM contents WHERE ' 'parent_inode=?', (id_p, )) as res: for (name_id, id_) in res: if self.db.has_val( 'SELECT 1 FROM contents WHERE parent_inode=?', (id_, )): if not found_subdirs: # When current directory has subdirectories, we must reinsert # it into queue found_subdirs = True queue.append(id_p) queue.append(id_) else: name = self.db.get_val( "SELECT name FROM names WHERE id=?", (name_id, )) inval_entry(name) self._remove(id_p, name, id_, force=True) processed += 1 if processed > gil_step: # Also reinsert current directory if we need to yield to other threads if not found_subdirs: queue.append(id_p) break if processed > gil_step: dt = time.time() - stamp gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 250) log.debug('Adjusting gil_step to %d and yielding', gil_step) processed = 0 llfuse.lock.yield_(100) log.debug('re-acquired lock') stamp = time.time() if id_p0 in self.open_inodes: log.debug('invalidate_entry(%d, %r)', id_p0, name0) llfuse.invalidate_entry(id_p0, name0) self._remove(id_p0, name0, id0, force=True) self.forget([(id0, 1)]) log.debug('finished')
def invalidate_entry(self, inode, name): llfuse.invalidate_entry(inode, name)
def invalidate_entry(self, entry, name): if entry.has_ref(False): # Only necessary if the kernel has previously done a lookup on this # inode and hasn't yet forgotten about it. llfuse.invalidate_entry(entry.inode, name.encode(self.encoding))