Exemple #1
0
 def clear(self):
     '''Delete all entries'''
     oldentries = self._entries
     self._entries = {}
     for n in oldentries:
         if isinstance(n, Directory):
             n.clear()
         llfuse.invalidate_entry(self.inode, str(n))
         self.inodes.del_entry(oldentries[n])
     llfuse.invalidate_inode(self.inode)
     self.invalidate()
Exemple #2
0
    def setxattr(self, inode, name, value, ctx):
        if inode != llfuse.ROOT_INODE or name != b'command':
            raise FUSEError(errno.ENOTSUP)

        if value == b'forget_entry':
            llfuse.invalidate_entry(llfuse.ROOT_INODE, self.hello_name)
        elif value == b'forget_inode':
            llfuse.invalidate_inode(self.hello_inode)
        elif value == b'store':
            llfuse.notify_store(self.hello_inode, offset=0,
                                data=self.hello_data)
        else:
            raise FUSEError(errno.EINVAL)
Exemple #3
0
    def setxattr(self, inode, name, value, ctx):
        if inode != llfuse.ROOT_INODE or name != b'command':
            raise FUSEError(errno.ENOTSUP)

        if value == b'forget_entry':
            llfuse.invalidate_entry(llfuse.ROOT_INODE, self.hello_name)
        elif value == b'forget_inode':
            llfuse.invalidate_inode(self.hello_inode)
        elif value == b'store':
            llfuse.notify_store(self.hello_inode,
                                offset=0,
                                data=self.hello_data)
        else:
            raise FUSEError(errno.EINVAL)
Exemple #4
0
 def invalidate_inode(self, entry):
     if entry.has_ref(False):
         # Only necessary if the kernel has previously done a lookup on this
         # inode and hasn't yet forgotten about it.
         llfuse.invalidate_inode(entry.inode)
Exemple #5
0
 def invalidate_inode(self, inode):
     llfuse.invalidate_inode(inode)
Exemple #6
0
    def copy_tree(self, src_id, target_id):
        """Efficiently copy directory tree"""

        log.debug("copy_tree(%d, %d): start", src_id, target_id)

        # To avoid lookups and make code tidier
        make_inode = self.inodes.create_inode
        db = self.db

        # First we make sure that all blocks are in the database
        self.cache.commit()
        log.debug("copy_tree(%d, %d): committed cache", src_id, target_id)

        # Copy target attributes
        src_inode = self.inodes[src_id]
        target_inode = self.inodes[target_id]
        for attr in ("atime", "ctime", "mtime", "mode", "uid", "gid"):
            setattr(target_inode, attr, getattr(src_inode, attr))

        # We first replicate into a dummy inode, so that we
        # need to invalidate only once.
        timestamp = time.time()
        tmp = make_inode(mtime=timestamp, ctime=timestamp, atime=timestamp, uid=0, gid=0, mode=0, refcount=0)

        queue = [(src_id, tmp.id, 0)]
        id_cache = dict()
        processed = 0  # Number of steps since last GIL release
        stamp = time.time()  # Time of last GIL release
        gil_step = 100  # Approx. number of steps between GIL releases
        while queue:
            (src_id, target_id, rowid) = queue.pop()
            log.debug(
                "copy_tree(%d, %d): Processing directory (%d, %d, %d)",
                src_inode.id,
                target_inode.id,
                src_id,
                target_id,
                rowid,
            )
            for (name_id, id_, rowid) in db.query(
                "SELECT name_id, inode, rowid FROM contents " "WHERE parent_inode=? AND rowid > ? " "ORDER BY rowid",
                (src_id, rowid),
            ):

                if id_ not in id_cache:
                    inode = self.inodes[id_]

                    try:
                        inode_new = make_inode(
                            refcount=1,
                            mode=inode.mode,
                            size=inode.size,
                            uid=inode.uid,
                            gid=inode.gid,
                            mtime=inode.mtime,
                            atime=inode.atime,
                            ctime=inode.ctime,
                            rdev=inode.rdev,
                        )
                    except OutOfInodesError:
                        log.warn("Could not find a free inode")
                        raise FUSEError(errno.ENOSPC)

                    id_new = inode_new.id

                    if inode.refcount != 1:
                        id_cache[id_] = id_new

                    db.execute(
                        "INSERT INTO symlink_targets (inode, target) "
                        "SELECT ?, target FROM symlink_targets WHERE inode=?",
                        (id_new, id_),
                    )

                    db.execute(
                        "INSERT INTO inode_blocks (inode, blockno, block_id) "
                        "SELECT ?, blockno, block_id FROM inode_blocks "
                        "WHERE inode=?",
                        (id_new, id_),
                    )

                    db.execute(
                        "UPDATE inodes SET block_id=" "(SELECT block_id FROM inodes WHERE id=?) " "WHERE id=?",
                        (id_, id_new),
                    )

                    processed += db.execute(
                        "REPLACE INTO blocks (id, hash, refcount, size, obj_id) "
                        "SELECT id, hash, refcount+1, size, obj_id "
                        "FROM inode_blocks_v JOIN blocks ON block_id = id "
                        "WHERE inode = ?",
                        (id_new,),
                    )

                    if db.has_val("SELECT 1 FROM contents WHERE parent_inode=?", (id_,)):
                        queue.append((id_, id_new, 0))
                else:
                    id_new = id_cache[id_]
                    self.inodes[id_new].refcount += 1

                db.execute(
                    "INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)", (name_id, id_new, target_id)
                )
                db.execute("UPDATE names SET refcount=refcount+1 WHERE id=?", (name_id,))

                processed += 1

                if processed > gil_step:
                    log.debug(
                        "copy_tree(%d, %d): Requeueing (%d, %d, %d) to yield lock",
                        src_inode.id,
                        target_inode.id,
                        src_id,
                        target_id,
                        rowid,
                    )
                    queue.append((src_id, target_id, rowid))
                    break

            if processed > gil_step:
                dt = time.time() - stamp
                gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 1)
                log.debug("copy_tree(%d, %d): Adjusting gil_step to %d", src_inode.id, target_inode.id, gil_step)
                processed = 0
                llfuse.lock.yield_()
                stamp = time.time()

        # Make replication visible
        self.db.execute("UPDATE contents SET parent_inode=? WHERE parent_inode=?", (target_inode.id, tmp.id))
        del self.inodes[tmp.id]
        llfuse.invalidate_inode(target_inode.id)

        log.debug("copy_tree(%d, %d): end", src_inode.id, target_inode.id)
Exemple #7
0
    def copy_tree(self, src_id, target_id):
        '''Efficiently copy directory tree'''

        if self.failsafe:
            raise FUSEError(errno.EPERM)

        log.debug('started with %d, %d', src_id, target_id)

        # To avoid lookups and make code tidier
        make_inode = self.inodes.create_inode
        db = self.db

        # First we make sure that all blocks are in the database
        self.cache.commit()
        log.debug('committed cache')

        # Copy target attributes
        # These come from setxattr, so they may have been deleted
        # without being in open_inodes
        try:
            src_inode = self.inodes[src_id]
            target_inode = self.inodes[target_id]
        except KeyError:
            raise FUSEError(errno.ENOENT)
        for attr in ('atime', 'ctime', 'mtime', 'mode', 'uid', 'gid'):
            setattr(target_inode, attr, getattr(src_inode, attr))

        # We first replicate into a dummy inode, so that we
        # need to invalidate only once.
        timestamp = time.time()
        tmp = make_inode(mtime=timestamp, ctime=timestamp, atime=timestamp,
                         uid=0, gid=0, mode=0, refcount=0)

        queue = [ (src_id, tmp.id, 0) ]
        id_cache = dict()
        processed = 0 # Number of steps since last GIL release
        stamp = time.time() # Time of last GIL release
        gil_step = 250 # Approx. number of steps between GIL releases
        while queue:
            (src_id, target_id, off) = queue.pop()
            log.debug('Processing directory (%d, %d, %d)', src_id, target_id, off)
            with db.query('SELECT name_id, inode FROM contents WHERE parent_inode=? '
                          'AND name_id > ? ORDER BY name_id', (src_id, off)) as res:
                for (name_id, id_) in res:

                    if id_ not in id_cache:
                        inode = self.inodes[id_]

                        try:
                            inode_new = make_inode(refcount=1, mode=inode.mode, size=inode.size,
                                                   uid=inode.uid, gid=inode.gid,
                                                   mtime=inode.mtime, atime=inode.atime,
                                                   ctime=inode.ctime, rdev=inode.rdev)
                        except OutOfInodesError:
                            log.warning('Could not find a free inode')
                            raise FUSEError(errno.ENOSPC)

                        id_new = inode_new.id

                        if inode.refcount != 1:
                            id_cache[id_] = id_new

                        db.execute('INSERT INTO symlink_targets (inode, target) '
                                   'SELECT ?, target FROM symlink_targets WHERE inode=?',
                                   (id_new, id_))

                        db.execute('INSERT INTO ext_attributes (inode, name_id, value) '
                                   'SELECT ?, name_id, value FROM ext_attributes WHERE inode=?',
                                   (id_new, id_))
                        db.execute('UPDATE names SET refcount = refcount + 1 WHERE '
                                   'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)',
                                   (id_,))

                        processed += db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) '
                                                'SELECT ?, blockno, block_id FROM inode_blocks '
                                                'WHERE inode=?', (id_new, id_))
                        db.execute('REPLACE INTO blocks (id, hash, refcount, size, obj_id) '
                                   'SELECT id, hash, refcount+COUNT(id), size, obj_id '
                                   'FROM inode_blocks JOIN blocks ON block_id = id '
                                   'WHERE inode = ? GROUP BY id', (id_new,))

                        if db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)):
                            queue.append((id_, id_new, 0))
                    else:
                        id_new = id_cache[id_]
                        self.inodes[id_new].refcount += 1

                    db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)',
                               (name_id, id_new, target_id))
                    db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,))

                    processed += 1

                    if processed > gil_step:
                        log.debug('Requeueing (%d, %d, %d) to yield lock',
                                  src_id, target_id, name_id)
                        queue.append((src_id, target_id, name_id))
                        break

            if processed > gil_step:
                dt = time.time() - stamp
                gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 250)
                log.debug('Adjusting gil_step to %d and yielding', gil_step)
                processed = 0
                llfuse.lock.yield_(100)
                log.debug('re-acquired lock')
                stamp = time.time()

        # Make replication visible
        self.db.execute('UPDATE contents SET parent_inode=? WHERE parent_inode=?',
                        (target_inode.id, tmp.id))
        del self.inodes[tmp.id]
        llfuse.invalidate_inode(target_inode.id)

        log.debug('finished')
Exemple #8
0
    def copy_tree(self, src_id, target_id):
        '''Efficiently copy directory tree'''

        if self.failsafe:
            raise FUSEError(errno.EPERM)

        log.debug('started with %d, %d', src_id, target_id)

        # To avoid lookups and make code tidier
        make_inode = self.inodes.create_inode
        db = self.db

        # First we make sure that all blocks are in the database
        self.cache.commit()
        log.debug('committed cache')

        # Copy target attributes
        # These come from setxattr, so they may have been deleted
        # without being in open_inodes
        try:
            src_inode = self.inodes[src_id]
            target_inode = self.inodes[target_id]
        except KeyError:
            raise FUSEError(errno.ENOENT)
        for attr in ('atime_ns', 'ctime_ns', 'mtime_ns', 'mode', 'uid', 'gid'):
            setattr(target_inode, attr, getattr(src_inode, attr))

        # We first replicate into a dummy inode, so that we
        # need to invalidate only once.
        now_ns = time_ns()
        tmp = make_inode(mtime_ns=now_ns,
                         ctime_ns=now_ns,
                         atime_ns=now_ns,
                         uid=0,
                         gid=0,
                         mode=0,
                         refcount=0)

        queue = [(src_id, tmp.id, 0)]
        id_cache = dict()
        processed = 0  # Number of steps since last GIL release
        stamp = time.time()  # Time of last GIL release
        gil_step = 250  # Approx. number of steps between GIL releases
        while queue:
            (src_id, target_id, off) = queue.pop()
            log.debug('Processing directory (%d, %d, %d)', src_id, target_id,
                      off)
            with db.query(
                    'SELECT name_id, inode FROM contents WHERE parent_inode=? '
                    'AND name_id > ? ORDER BY name_id', (src_id, off)) as res:
                for (name_id, id_) in res:

                    if id_ not in id_cache:
                        inode = self.inodes[id_]

                        try:
                            inode_new = make_inode(refcount=1,
                                                   mode=inode.mode,
                                                   size=inode.size,
                                                   uid=inode.uid,
                                                   gid=inode.gid,
                                                   mtime_ns=inode.mtime_ns,
                                                   atime_ns=inode.atime_ns,
                                                   ctime_ns=inode.ctime_ns,
                                                   rdev=inode.rdev)
                        except OutOfInodesError:
                            log.warning('Could not find a free inode')
                            raise FUSEError(errno.ENOSPC)

                        id_new = inode_new.id

                        if inode.refcount != 1:
                            id_cache[id_] = id_new

                        db.execute(
                            'INSERT INTO symlink_targets (inode, target) '
                            'SELECT ?, target FROM symlink_targets WHERE inode=?',
                            (id_new, id_))

                        db.execute(
                            'INSERT INTO ext_attributes (inode, name_id, value) '
                            'SELECT ?, name_id, value FROM ext_attributes WHERE inode=?',
                            (id_new, id_))
                        db.execute(
                            'UPDATE names SET refcount = refcount + 1 WHERE '
                            'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)',
                            (id_, ))

                        processed += db.execute(
                            'INSERT INTO inode_blocks (inode, blockno, block_id) '
                            'SELECT ?, blockno, block_id FROM inode_blocks '
                            'WHERE inode=?', (id_new, id_))
                        db.execute(
                            'REPLACE INTO blocks (id, hash, refcount, size, obj_id) '
                            'SELECT id, hash, refcount+COUNT(id), size, obj_id '
                            'FROM inode_blocks JOIN blocks ON block_id = id '
                            'WHERE inode = ? GROUP BY id', (id_new, ))

                        if db.has_val(
                                'SELECT 1 FROM contents WHERE parent_inode=?',
                            (id_, )):
                            queue.append((id_, id_new, 0))
                    else:
                        id_new = id_cache[id_]
                        self.inodes[id_new].refcount += 1

                    db.execute(
                        'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)',
                        (name_id, id_new, target_id))
                    db.execute(
                        'UPDATE names SET refcount=refcount+1 WHERE id=?',
                        (name_id, ))

                    processed += 1

                    if processed > gil_step:
                        log.debug('Requeueing (%d, %d, %d) to yield lock',
                                  src_id, target_id, name_id)
                        queue.append((src_id, target_id, name_id))
                        break

            if processed > gil_step:
                dt = time.time() - stamp
                gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 250)
                log.debug('Adjusting gil_step to %d and yielding', gil_step)
                processed = 0
                llfuse.lock.yield_(100)
                log.debug('re-acquired lock')
                stamp = time.time()

        # Make replication visible
        self.db.execute(
            'UPDATE contents SET parent_inode=? WHERE parent_inode=?',
            (target_inode.id, tmp.id))
        del self.inodes[tmp.id]
        llfuse.invalidate_inode(target_inode.id)

        log.debug('finished')
Exemple #9
0
 def del_entry(self, entry):
     llfuse.invalidate_inode(entry.inode)
     del self._entries[entry.inode]
Exemple #10
0
 def del_entry(self, entry):
     llfuse.invalidate_inode(entry.inode)
     del self._entries[entry.inode]
Exemple #11
0
 def invalidate_inode(self, inode):
     llfuse.invalidate_inode(inode)
Exemple #12
0
 def invalidate_inode(self, entry):
     if entry.has_ref(False):
         # Only necessary if the kernel has previously done a lookup on this
         # inode and hasn't yet forgotten about it.
         llfuse.invalidate_inode(entry.inode)