class fs_api_tests(TestCase): def setUp(self): self.bucket_dir = tempfile.mkdtemp() self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None)) self.bucket = self.bucket_pool.pop_conn() self.cachedir = tempfile.mkdtemp() + "/" self.blocksize = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 5) self.server = fs.Operations(self.block_cache, self.db, self.blocksize) self.server.init() # Keep track of unused filenames self.name_cnt = 0 def tearDown(self): self.server.destroy() self.block_cache.destroy() if os.path.exists(self.cachedir): shutil.rmtree(self.cachedir) shutil.rmtree(self.bucket_dir) llfuse.lock.release() @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def fsck(self): self.block_cache.clear() self.server.inodes.flush() fsck = Fsck(self.cachedir, self.bucket, { 'blocksize': self.blocksize }, self.db) fsck.check() self.assertFalse(fsck.found_errors) def newname(self): self.name_cnt += 1 return "s3ql_%d" % self.name_cnt def test_getattr_root(self): self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode)) self.fsck() def test_create(self): ctx = Ctx() mode = self.dir_mode() name = self.newname() inode_p_old = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server._create(ROOT_INODE, name, mode, ctx) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON name_id = names.id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) inode = self.server.getattr(id_) self.assertEqual(inode.mode, mode) self.assertEqual(inode.uid, ctx.uid) self.assertEqual(inode.gid, ctx.gid) self.assertEqual(inode.refcount, 1) self.assertEqual(inode.size, 0) inode_p_new = self.server.getattr(ROOT_INODE) self.assertGreater(inode_p_new.mtime, inode_p_old.mtime) self.assertGreater(inode_p_new.ctime, inode_p_old.ctime) self.fsck() def test_extstat(self): # Test with zero contents self.server.extstat() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.server.extstat() # Test with data in file fh = self.server.open(inode.id, os.O_RDWR) self.server.write(fh, 0, 'foobar') self.server.release(fh) self.server.extstat() self.fsck() @staticmethod def dir_mode(): return (randint(0, 07777) & ~stat.S_IFDIR) | stat.S_IFDIR @staticmethod def file_mode(): return (randint(0, 07777) & ~stat.S_IFREG) | stat.S_IFREG def test_getxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.getxattr, inode.id, 'nonexistant-attr') self.server.setxattr(inode.id, 'my-attr', 'strabumm!') self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!') self.fsck() def test_link(self): name = self.newname() inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) time.sleep(CLOCK_GRANULARITY) inode_before = self.server.getattr(inode.id).copy() self.server.link(inode.id, inode_p_new.id, name) inode_after = self.server.lookup(inode_p_new.id, name) inode_p_new_after = self.server.getattr(inode_p_new.id) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, inode_p_new.id)) self.assertEqual(inode_before.id, id_) self.assertEqual(inode_after.refcount, 2) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.fsck() def test_listxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.assertListEqual([], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, 'key1', 'blub') self.assertListEqual(['key1'], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, 'key2', 'blub') self.assertListEqual(sorted(['key1', 'key2']), sorted(self.server.listxattr(inode.id))) self.fsck() def test_read(self): len_ = self.blocksize data = self.random_data(len_) off = self.blocksize // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.write(fh, off, data) inode_before = self.server.getattr(inode.id).copy() time.sleep(CLOCK_GRANULARITY) self.assertTrue(self.server.read(fh, off, len_) == data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.atime, inode_before.atime) self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off]) self.assertTrue(self.server.read(fh, self.blocksize, len_) == data[off:]) self.server.release(fh) self.fsck() def test_readdir(self): # Create a few entries names = [ 'entry_%2d' % i for i in range(20) ] for name in names: (fh, _) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx()) self.server.release(fh) # Delete some to make sure that we don't have continous rowids remove_no = [0, 2, 3, 5, 9] for i in remove_no: self.server.unlink(ROOT_INODE, names[i]) del names[i] # Read all fh = self.server.opendir(ROOT_INODE) self.assertListEqual(sorted(names + ['lost+found']) , sorted(x[0] for x in self.server.readdir(fh, 0))) self.server.releasedir(fh) # Read in parts fh = self.server.opendir(ROOT_INODE) entries = list() try: next_ = 0 while True: gen = self.server.readdir(fh, next_) for _ in range(3): (name, _, next_) = next(gen) entries.append(name) except StopIteration: pass self.assertListEqual(sorted(names + ['lost+found']) , sorted(entries)) self.server.releasedir(fh) self.fsck() def test_release(self): name = self.newname() # Test that entries are deleted when they're no longer referenced (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx()) self.server.write(fh, 0, 'foobar') self.server.unlink(ROOT_INODE, name) self.assertFalse(self.db.has_val('SELECT 1 FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.server.getattr(inode.id).id) self.server.release(fh) self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_removexattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.removexattr, inode.id, 'some name') self.server.setxattr(inode.id, 'key1', 'blub') self.server.removexattr(inode.id, 'key1') self.assertListEqual([], self.server.listxattr(inode.id)) self.fsck() def test_rename(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id == name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.fsck() def test_replace_file(self): oldname = self.newname() newname = self.newname() (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), Ctx()) self.server.write(fh, 0, 'some data to deal with') self.server.release(fh) self.server.setxattr(inode.id, 'test_xattr', '42*8') inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(), Ctx()) self.server.write(fh, 0, 'even more data to deal with') self.server.release(fh) self.server.setxattr(inode2.id, 'test_xattr', '42*8') time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,))) self.fsck() def test_replace_dir(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx()) time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,))) self.fsck() def test_setattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0641, Ctx()) self.server.release(fh) inode_old = self.server.getattr(inode.id).copy() attr = llfuse.EntryAttributes() attr.st_mode = self.file_mode() attr.st_uid = randint(0, 2 ** 32) attr.st_gid = randint(0, 2 ** 32) attr.st_rdev = randint(0, 2 ** 32) attr.st_atime = time.timezone + randint(0, 2 ** 32) / 10 ** 6 attr.st_mtime = time.timezone + randint(0, 2 ** 32) / 10 ** 6 time.sleep(CLOCK_GRANULARITY) self.server.setattr(inode.id, attr) inode_new = self.server.getattr(inode.id) self.assertGreater(inode_new.ctime, inode_old.ctime) for key in attr.__slots__: if getattr(attr, key) is not None: self.assertEquals(getattr(attr, key), getattr(inode_new, key)) def test_truncate(self): len_ = int(2.7 * self.blocksize) data = self.random_data(len_) attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.write(fh, 0, data) attr.st_size = len_ // 2 self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2]) attr.st_size = len_ self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2] + b'\0' * (len_ // 2)) self.server.release(fh) self.fsck() def test_truncate_0(self): len1 = 158 len2 = 133 attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.write(fh, 0, self.random_data(len1)) self.server.release(fh) self.server.inodes.flush() fh = self.server.open(inode.id, os.O_RDWR) attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, self.random_data(len2)) self.server.release(fh) self.fsck() def test_setxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.server.setxattr(inode.id, 'my-attr', 'strabumm!') self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!') self.fsck() def test_statfs(self): # Test with zero contents self.server.statfs() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.server.statfs() # Test with data in file fh = self.server.open(inode.id, None) self.server.write(fh, 0, 'foobar') self.server.release(fh) self.server.statfs() def test_symlink(self): target = self.newname() name = self.newname() inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) inode = self.server.symlink(ROOT_INODE, name, target, Ctx()) inode_p_after = self.server.getattr(ROOT_INODE) self.assertEqual(target, self.server.readlink(inode.id)) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) def test_unlink(self): name = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx()) self.server.write(fh, 0, 'some data to deal with') self.server.release(fh) # Add extended attributes self.server.setxattr(inode.id, 'test_xattr', '42*8') inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.unlink(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_rmdir(self): name = self.newname() inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx()) inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.rmdir(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_relink(self): name = self.newname() name2 = self.newname() data = 'some data to deal with' (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx()) self.server.write(fh, 0, data) self.server.unlink(ROOT_INODE, name) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.server.link(inode.id, ROOT_INODE, name2) self.server.release(fh) fh = self.server.open(inode.id, os.O_RDONLY) self.assertTrue(self.server.read(fh, 0, len(data)) == data) self.server.release(fh) self.fsck() def test_write(self): len_ = self.blocksize data = self.random_data(len_) off = self.blocksize // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) inode_before = self.server.getattr(inode.id).copy() time.sleep(CLOCK_GRANULARITY) self.server.write(fh, off, data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.mtime, inode_before.mtime) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertEqual(inode_after.size, off + len_) self.server.write(fh, 0, data) inode_after = self.server.getattr(inode.id) self.assertEqual(inode_after.size, off + len_) self.server.release(fh) self.fsck() def test_edit(self): len_ = self.blocksize data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.write(fh, 0, data) self.server.release(fh) self.block_cache.clear() fh = self.server.open(inode.id, os.O_RDWR) attr = llfuse.EntryAttributes() attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, data[50:]) self.server.release(fh) self.fsck() def test_copy_tree(self): src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx()) # Create file (fh, f1_inode) = self.server.create(src_inode.id, 'file1', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) # Create hardlink (fh, f2_inode) = self.server.create(src_inode.id, 'file2', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) f2_inode = self.server.link(f2_inode.id, src_inode.id, 'file2_hardlink') # Create subdirectory d1_inode = self.server.mkdir(src_inode.id, 'dir1', self.dir_mode(), Ctx()) d2_inode = self.server.mkdir(d1_inode.id, 'dir2', self.dir_mode(), Ctx()) # ..with a 3rd hardlink f2_inode = self.server.link(f2_inode.id, d1_inode.id, 'file2_hardlink') # Replicate self.server.copy_tree(src_inode.id, dst_inode.id) # Change files fh = self.server.open(f1_inode.id, os.O_RDWR) self.server.write(fh, 0, 'new file1 contents') self.server.release(fh) fh = self.server.open(f2_inode.id, os.O_RDWR) self.server.write(fh, 0, 'new file2 contents') self.server.release(fh) # Get copy properties f1_inode_c = self.server.lookup(dst_inode.id, 'file1') f2_inode_c = self.server.lookup(dst_inode.id, 'file2') f2h_inode_c = self.server.lookup(dst_inode.id, 'file2_hardlink') d1_inode_c = self.server.lookup(dst_inode.id, 'dir1') d2_inode_c = self.server.lookup(d1_inode_c.id, 'dir2') f2_h_inode_c = self.server.lookup(d1_inode_c.id, 'file2_hardlink') # Check file1 fh = self.server.open(f1_inode_c.id, os.O_RDWR) self.assertEqual(self.server.read(fh, 0, 42), 'file1 contents') self.server.release(fh) self.assertNotEqual(f1_inode.id, f1_inode_c.id) # Check file2 fh = self.server.open(f2_inode_c.id, os.O_RDWR) self.assertTrue(self.server.read(fh, 0, 42) == 'file2 contents') self.server.release(fh) self.assertEqual(f2_inode_c.id, f2h_inode_c.id) self.assertEqual(f2_inode_c.refcount, 3) self.assertNotEqual(f2_inode.id, f2_inode_c.id) self.assertEqual(f2_h_inode_c.id, f2_inode_c.id) # Check subdir1 self.assertNotEqual(d1_inode.id, d1_inode_c.id) self.assertNotEqual(d2_inode.id, d2_inode_c.id) self.fsck() def test_lock_tree(self): inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, 'file1', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, 'file2', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) # Another file (fh, inode3) = self.server.create(ROOT_INODE, 'file1', self.file_mode(), Ctx()) self.server.release(fh) # Lock self.server.lock_tree(inode1.id) for i in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertTrue(self.server.inodes[i].locked) # Remove with self.assertRaises(FUSEError) as cm: self.server._remove(inode1.id, 'file1', inode1a.id) self.assertEqual(cm.exception.errno, errno.EPERM) # Rename / Replace with self.assertRaises(FUSEError) as cm: self.server.rename(ROOT_INODE, 'file1', inode1.id, 'file2') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.rename(inode1.id, 'file1', ROOT_INODE, 'file2') self.assertEqual(cm.exception.errno, errno.EPERM) # Open with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_RDWR) self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_WRONLY) self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(self.server.open(inode3.id, os.O_WRONLY)) # Write fh = self.server.open(inode2a.id, os.O_RDONLY) with self.assertRaises(FUSEError) as cm: self.server.write(fh, 0, 'foo') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(fh) # Create with self.assertRaises(FUSEError) as cm: self.server._create(inode2.id, 'dir1', self.dir_mode(), Ctx()) self.assertEqual(cm.exception.errno, errno.EPERM) # Setattr with self.assertRaises(FUSEError) as cm: self.server.setattr(inode2a.id, dict()) self.assertEqual(cm.exception.errno, errno.EPERM) # xattr with self.assertRaises(FUSEError) as cm: self.server.setxattr(inode2.id, 'name', 'value') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.removexattr(inode2.id, 'name') self.assertEqual(cm.exception.errno, errno.EPERM) self.fsck() def test_remove_tree(self): inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, 'file1', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, 'file2', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) # Remove self.server.remove_tree(ROOT_INODE, 'source') for (id_p, name) in ((ROOT_INODE, 'source'), (inode1.id, 'file1'), (inode1.id, 'dir1'), (inode2.id, 'file2')): self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, id_p))) for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,))) self.fsck()
class cache_tests(TestCase): def setUp(self): self.bucket_dir = tempfile.mkdtemp() self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None)) self.cachedir = tempfile.mkdtemp() + "/" self.blocksize = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Create an inode we can work with self.inode = 42 self.db.execute( "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", ( self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32, ), ) self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() def tearDown(self): self.cache.bucket_pool = self.bucket_pool self.cache.destroy() if os.path.exists(self.cachedir): shutil.rmtree(self.cachedir) shutil.rmtree(self.bucket_dir) llfuse.lock.release() @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fh: return fh.read(len_) def test_get(self): inode = self.inode blockno = 11 data = self.random_data(int(0.5 * self.blocksize)) # Case 1: Object does not exist yet with self.cache.get(inode, blockno) as fh: fh.seek(0) fh.write(data) # Case 2: Object is in cache with self.cache.get(inode, blockno) as fh: fh.seek(0) self.assertEqual(data, fh.read(len(data))) # Case 3: Object needs to be downloaded self.cache.clear() with self.cache.get(inode, blockno) as fh: fh.seek(0) self.assertEqual(data, fh.read(len(data))) def test_expire(self): inode = self.inode # Define the 4 most recently accessed ones most_recent = [7, 11, 10, 8] for i in most_recent: time.sleep(0.2) with self.cache.get(inode, i) as fh: fh.write("%d" % i) # And some others for i in range(20): if i in most_recent: continue with self.cache.get(inode, i) as fh: fh.write("%d" % i) # Flush the 2 most recently accessed ones commit(self.cache, inode, most_recent[-2]) commit(self.cache, inode, most_recent[-3]) # We want to expire 4 entries, 2 of which are already flushed self.cache.max_entries = 16 self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=2) self.cache.expire() self.cache.bucket_pool.verify() self.assertEqual(len(self.cache.entries), 16) for i in range(20): if i in most_recent: self.assertTrue((inode, i) not in self.cache.entries) else: self.assertTrue((inode, i) in self.cache.entries) def test_upload(self): inode = self.inode datalen = int(0.1 * self.cache.max_size) blockno1 = 21 blockno2 = 25 blockno3 = 7 data1 = self.random_data(datalen) data2 = self.random_data(datalen) data3 = self.random_data(datalen) # Case 1: create new object self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data1) el1 = fh self.cache.upload(el1) self.cache.bucket_pool.verify() # Case 2: Link new object self.cache.bucket_pool = TestBucketPool(self.bucket_pool) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data1) el2 = fh self.cache.upload(el2) self.cache.bucket_pool.verify() # Case 3: Upload old object, still has references self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data2) self.cache.upload(el1) self.cache.bucket_pool.verify() # Case 4: Upload old object, no references left self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1, no_write=1) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data3) self.cache.upload(el2) self.cache.bucket_pool.verify() # Case 5: Link old object, no references left self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data2) self.cache.upload(el2) self.cache.bucket_pool.verify() # Case 6: Link old object, still has references # (Need to create another object first) self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) with self.cache.get(inode, blockno3) as fh: fh.seek(0) fh.write(data1) el3 = fh self.cache.upload(el3) self.cache.bucket_pool.verify() self.cache.bucket_pool = TestBucketPool(self.bucket_pool) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data1) self.cache.upload(el1) self.cache.clear() self.cache.bucket_pool.verify() def test_remove_referenced(self): inode = self.inode datalen = int(0.1 * self.cache.max_size) blockno1 = 21 blockno2 = 24 data = self.random_data(datalen) self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data) self.cache.clear() self.cache.bucket_pool.verify() self.cache.bucket_pool = TestBucketPool(self.bucket_pool) self.cache.remove(inode, blockno1) self.cache.bucket_pool.verify() def test_remove_cache(self): inode = self.inode data1 = self.random_data(int(0.4 * self.blocksize)) # Case 1: Elements only in cache with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.remove(inode, 1) with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertTrue(fh.read(42) == "") def test_remove_cache_db(self): inode = self.inode data1 = self.random_data(int(0.4 * self.blocksize)) # Case 2: Element in cache and db with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) commit(self.cache, inode) self.cache.bucket_pool.verify() self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1) self.cache.remove(inode, 1) self.cache.bucket_pool.verify() with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertTrue(fh.read(42) == "") def test_remove_db(self): inode = self.inode data1 = self.random_data(int(0.4 * self.blocksize)) # Case 3: Element only in DB with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) self.cache.clear() self.cache.bucket_pool.verify() self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1) self.cache.remove(inode, 1) self.cache.bucket_pool.verify() with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertTrue(fh.read(42) == "")
class fs_api_tests(unittest.TestCase): def setUp(self): self.backend_dir = tempfile.mkdtemp() plain_backend = local.Backend('local://' + self.backend_dir, None, None) self.backend_pool = BackendPool(lambda: BetterBackend(b'schwubl', 'lzma', plain_backend)) self.backend = self.backend_pool.pop_conn() self.cachedir = tempfile.mkdtemp() self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() self.block_cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 5) self.server = fs.Operations(self.block_cache, self.db, self.max_obj_size, InodeCache(self.db, 0)) self.server.init() # Keep track of unused filenames self.name_cnt = 0 def tearDown(self): self.server.inodes.flush() self.block_cache.destroy() shutil.rmtree(self.cachedir) shutil.rmtree(self.backend_dir) os.unlink(self.dbfile.name) llfuse.lock.release() @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def fsck(self): self.block_cache.clear() self.server.inodes.flush() fsck = Fsck(self.cachedir + '/cache', self.backend, { 'max_obj_size': self.max_obj_size }, self.db) fsck.check() self.assertFalse(fsck.found_errors) def newname(self): self.name_cnt += 1 return "s3ql_%d" % self.name_cnt def test_getattr_root(self): self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode)) self.fsck() def test_create(self): ctx = Ctx() mode = self.dir_mode() name = self.newname() inode_p_old = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server._create(ROOT_INODE, name, mode, ctx) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON name_id = names.id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) inode = self.server.getattr(id_) self.assertEqual(inode.mode, mode) self.assertEqual(inode.uid, ctx.uid) self.assertEqual(inode.gid, ctx.gid) self.assertEqual(inode.refcount, 1) self.assertEqual(inode.size, 0) inode_p_new = self.server.getattr(ROOT_INODE) self.assertGreater(inode_p_new.mtime, inode_p_old.mtime) self.assertGreater(inode_p_new.ctime, inode_p_old.ctime) self.server.forget([(id_, 1)]) self.fsck() def test_extstat(self): # Test with zero contents self.server.extstat() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.extstat() # Test with data in file fh = self.server.open(inode.id, os.O_RDWR) self.server.write(fh, 0, 'foobar') self.server.release(fh) self.server.extstat() self.server.forget([(inode.id, 1)]) self.fsck() @staticmethod def dir_mode(): return (randint(0, 07777) & ~stat.S_IFDIR) | stat.S_IFDIR @staticmethod def file_mode(): return (randint(0, 07777) & ~stat.S_IFREG) | stat.S_IFREG def test_getxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.getxattr, inode.id, 'nonexistant-attr') self.server.setxattr(inode.id, 'my-attr', 'strabumm!') self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!') self.server.forget([(inode.id, 1)]) self.fsck() def test_link(self): name = self.newname() inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) time.sleep(CLOCK_GRANULARITY) inode_before = self.server.getattr(inode.id).copy() self.server.link(inode.id, inode_p_new.id, name) inode_after = self.server.lookup(inode_p_new.id, name) inode_p_new_after = self.server.getattr(inode_p_new.id) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, inode_p_new.id)) self.assertEqual(inode_before.id, id_) self.assertEqual(inode_after.refcount, 2) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1), (inode_after.id, 1)]) self.fsck() def test_listxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertListEqual([], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, 'key1', 'blub') self.assertListEqual(['key1'], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, 'key2', 'blub') self.assertListEqual(sorted(['key1', 'key2']), sorted(self.server.listxattr(inode.id))) self.server.forget([(inode.id, 1)]) self.fsck() def test_read(self): len_ = self.max_obj_size data = self.random_data(len_) off = self.max_obj_size // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, off, data) inode_before = self.server.getattr(inode.id).copy() time.sleep(CLOCK_GRANULARITY) self.assertTrue(self.server.read(fh, off, len_) == data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.atime, inode_before.atime) self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off]) self.assertTrue(self.server.read(fh, self.max_obj_size, len_) == data[off:]) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_readdir(self): # Create a few entries names = [ 'entry_%2d' % i for i in range(20) ] for name in names: (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode.id, 1)]) # Delete some to make sure that we don't have continous rowids remove_no = [0, 2, 3, 5, 9] for i in remove_no: self.server.unlink(ROOT_INODE, names[i]) del names[i] # Read all fh = self.server.opendir(ROOT_INODE) self.assertListEqual(sorted(names + ['lost+found']) , sorted(x[0] for x in self.server.readdir(fh, 0))) self.server.releasedir(fh) # Read in parts fh = self.server.opendir(ROOT_INODE) entries = list() try: next_ = 0 while True: gen = self.server.readdir(fh, next_) for _ in range(3): (name, _, next_) = next(gen) entries.append(name) except StopIteration: pass self.assertListEqual(sorted(names + ['lost+found']) , sorted(entries)) self.server.releasedir(fh) self.fsck() def test_forget(self): name = self.newname() # Test that entries are deleted when they're no longer referenced (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'foobar') self.server.unlink(ROOT_INODE, name) self.assertFalse(self.db.has_val('SELECT 1 FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.server.getattr(inode.id).id) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_removexattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.removexattr, inode.id, 'some name') self.server.setxattr(inode.id, 'key1', 'blub') self.server.removexattr(inode.id, 'key1') self.assertListEqual([], self.server.listxattr(inode.id)) self.server.forget([(inode.id, 1)]) self.fsck() def test_rename(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id == name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.fsck() def test_replace_file(self): oldname = self.newname() newname = self.newname() (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'some data to deal with') self.server.release(fh) self.server.setxattr(inode.id, 'test_xattr', '42*8') inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'even more data to deal with') self.server.release(fh) self.server.setxattr(inode2.id, 'test_xattr', '42*8') self.server.forget([(inode2.id, 1)]) time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,))) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.fsck() def test_replace_dir(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx()) self.server.forget([(inode2.id, 1)]) time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,))) self.fsck() def test_setattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0641, os.O_RDWR, Ctx()) self.server.release(fh) inode_old = self.server.getattr(inode.id).copy() attr = llfuse.EntryAttributes() attr.st_mode = self.file_mode() attr.st_uid = randint(0, 2 ** 32) attr.st_gid = randint(0, 2 ** 32) attr.st_atime = randint(0, 2 ** 32) / 10 ** 6 attr.st_mtime = randint(0, 2 ** 32) / 10 ** 6 time.sleep(CLOCK_GRANULARITY) self.server.setattr(inode.id, attr) inode_new = self.server.getattr(inode.id) self.assertGreater(inode_new.ctime, inode_old.ctime) for key in attr.__slots__: if getattr(attr, key) is not None: self.assertEquals(getattr(attr, key), getattr(inode_new, key)) self.server.forget([(inode.id, 1)]) self.fsck() def test_truncate(self): len_ = int(2.7 * self.max_obj_size) data = self.random_data(len_) attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) attr.st_size = len_ // 2 self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2]) attr.st_size = len_ self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2] + b'\0' * (len_ // 2)) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_truncate_0(self): len1 = 158 len2 = 133 attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, self.random_data(len1)) self.server.release(fh) self.server.inodes.flush() fh = self.server.open(inode.id, os.O_RDWR) attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, self.random_data(len2)) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_setxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.setxattr(inode.id, 'my-attr', 'strabumm!') self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!') self.server.forget([(inode.id, 1)]) self.fsck() def test_names(self): name1 = self.newname() name2 = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name1, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode.id, 1)]) (fh, inode) = self.server.create(ROOT_INODE, name2, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.setxattr(inode.id, name1, 'strabumm!') self.fsck() self.server.removexattr(inode.id, name1) self.fsck() self.server.setxattr(inode.id, name1, 'strabumm karacho!!') self.server.unlink(ROOT_INODE, name1) self.server.forget([(inode.id, 1)]) self.fsck() def test_statfs(self): # Test with zero contents self.server.statfs() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.statfs() # Test with data in file fh = self.server.open(inode.id, os.O_RDWR) self.server.write(fh, 0, 'foobar') self.server.release(fh) self.server.forget([(inode.id, 1)]) self.server.statfs() def test_symlink(self): target = self.newname() name = self.newname() inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) inode = self.server.symlink(ROOT_INODE, name, target, Ctx()) inode_p_after = self.server.getattr(ROOT_INODE) self.assertEqual(target, self.server.readlink(inode.id)) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.server.forget([(inode.id, 1)]) self.fsck() def test_unlink(self): name = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'some data to deal with') self.server.release(fh) # Add extended attributes self.server.setxattr(inode.id, 'test_xattr', '42*8') self.server.forget([(inode.id, 1)]) inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.unlink(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_rmdir(self): name = self.newname() inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx()) self.server.forget([(inode.id, 1)]) inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.rmdir(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_relink(self): name = self.newname() name2 = self.newname() data = 'some data to deal with' (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.release(fh) self.server.unlink(ROOT_INODE, name) self.server.inodes.flush() self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.server.link(inode.id, ROOT_INODE, name2) self.server.forget([(inode.id, 2)]) inode = self.server.lookup(ROOT_INODE, name2) fh = self.server.open(inode.id, os.O_RDONLY) self.assertTrue(self.server.read(fh, 0, len(data)) == data) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_write(self): len_ = self.max_obj_size data = self.random_data(len_) off = self.max_obj_size // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) inode_before = self.server.getattr(inode.id).copy() time.sleep(CLOCK_GRANULARITY) self.server.write(fh, off, data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.mtime, inode_before.mtime) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertEqual(inode_after.size, off + len_) self.server.write(fh, 0, data) inode_after = self.server.getattr(inode.id) self.assertEqual(inode_after.size, off + len_) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_failsafe(self): len_ = self.max_obj_size data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.cache.clear() self.assertTrue(self.server.failsafe is False) datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1') shutil.copy(datafile, datafile + '.bak') # Modify contents with open(datafile, 'r+') as rfh: rfh.seek(560) rfh.write(b'blrub!') with self.assertRaises(FUSEError) as cm: self.server.read(fh, 0, len_) self.assertEqual(cm.exception.errno, errno.EIO) self.assertTrue(self.server.failsafe) # Restore contents, but should be marked as damaged now os.rename(datafile + '.bak', datafile) with self.assertRaises(FUSEError) as cm: self.server.read(fh, 0, len_) self.assertEqual(cm.exception.errno, errno.EIO) # Release and re-open, now we should be able to access again self.server.release(fh) self.server.forget([(inode.id, 1)]) # ..but not write access since we are in failsafe mode with self.assertRaises(FUSEError) as cm: self.server.open(inode.id, os.O_RDWR) self.assertEqual(cm.exception.errno, errno.EPERM) # ..ready only is fine. fh = self.server.open(inode.id, os.O_RDONLY) self.server.read(fh, 0, len_) # Remove completely, should give error after cache flush os.unlink(datafile) self.server.read(fh, 3, len_//2) self.server.cache.clear() with self.assertRaises(FUSEError) as cm: self.server.read(fh, 5, len_//2) self.assertEqual(cm.exception.errno, errno.EIO) # Don't call fsck, we're missing a block def test_create_open(self): name = self.newname() # Create a new file (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode, 1)]) # Open it atomically (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode, 1)]) self.fsck() def test_edit(self): len_ = self.max_obj_size data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.release(fh) self.block_cache.clear() fh = self.server.open(inode.id, os.O_RDWR) attr = llfuse.EntryAttributes() attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, data[50:]) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_copy_tree(self): ext_attr_name = 'system.foo.brazl' ext_attr_val = 'schulla dku woumm bramp' src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx()) # Create file (fh, f1_inode) = self.server.create(src_inode.id, 'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) self.server.setxattr(f1_inode.id, ext_attr_name, ext_attr_val) # Create hardlink (fh, f2_inode) = self.server.create(src_inode.id, 'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) f2_inode = self.server.link(f2_inode.id, src_inode.id, 'file2_hardlink') # Create subdirectory d1_inode = self.server.mkdir(src_inode.id, 'dir1', self.dir_mode(), Ctx()) d2_inode = self.server.mkdir(d1_inode.id, 'dir2', self.dir_mode(), Ctx()) # ..with a 3rd hardlink f2_inode = self.server.link(f2_inode.id, d1_inode.id, 'file2_hardlink') # Replicate self.server.copy_tree(src_inode.id, dst_inode.id) # Change files fh = self.server.open(f1_inode.id, os.O_RDWR) self.server.write(fh, 0, 'new file1 contents') self.server.release(fh) fh = self.server.open(f2_inode.id, os.O_RDWR) self.server.write(fh, 0, 'new file2 contents') self.server.release(fh) # Get copy properties f1_inode_c = self.server.lookup(dst_inode.id, 'file1') f2_inode_c = self.server.lookup(dst_inode.id, 'file2') f2h_inode_c = self.server.lookup(dst_inode.id, 'file2_hardlink') d1_inode_c = self.server.lookup(dst_inode.id, 'dir1') d2_inode_c = self.server.lookup(d1_inode_c.id, 'dir2') f2_h_inode_c = self.server.lookup(d1_inode_c.id, 'file2_hardlink') # Check file1 fh = self.server.open(f1_inode_c.id, os.O_RDWR) self.assertEqual(self.server.read(fh, 0, 42), 'file1 contents') self.server.release(fh) self.assertNotEqual(f1_inode.id, f1_inode_c.id) self.assertEqual(self.server.getxattr(f1_inode_c.id, ext_attr_name), ext_attr_val) # Check file2 fh = self.server.open(f2_inode_c.id, os.O_RDWR) self.assertTrue(self.server.read(fh, 0, 42) == 'file2 contents') self.server.release(fh) self.assertEqual(f2_inode_c.id, f2h_inode_c.id) self.assertEqual(f2_inode_c.refcount, 3) self.assertNotEqual(f2_inode.id, f2_inode_c.id) self.assertEqual(f2_h_inode_c.id, f2_inode_c.id) # Check subdir1 self.assertNotEqual(d1_inode.id, d1_inode_c.id) self.assertNotEqual(d2_inode.id, d2_inode_c.id) self.server.forget(self.server.open_inodes.items()) self.fsck() def test_copy_tree_2(self): src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx()) # Create file (fh, inode) = self.server.create(src_inode.id, 'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'block 1 contents') self.server.write(fh, self.max_obj_size, 'block 1 contents') self.server.release(fh) self.server.forget([(inode.id, 1)]) self.server.copy_tree(src_inode.id, dst_inode.id) self.server.forget([(src_inode.id, 1), (dst_inode.id, 1)]) self.fsck() def test_lock_tree(self): inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, 'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, 'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) # Another file (fh, inode3) = self.server.create(ROOT_INODE, 'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) # Lock self.server.lock_tree(inode1.id) for i in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertTrue(self.server.inodes[i].locked) # Remove with self.assertRaises(FUSEError) as cm: self.server._remove(inode1.id, 'file1', inode1a.id) self.assertEqual(cm.exception.errno, errno.EPERM) # Rename / Replace with self.assertRaises(FUSEError) as cm: self.server.rename(ROOT_INODE, 'file1', inode1.id, 'file2') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.rename(inode1.id, 'file1', ROOT_INODE, 'file2') self.assertEqual(cm.exception.errno, errno.EPERM) # Open with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_RDWR) self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_WRONLY) self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(self.server.open(inode3.id, os.O_WRONLY)) # Write fh = self.server.open(inode2a.id, os.O_RDONLY) with self.assertRaises(FUSEError) as cm: self.server.write(fh, 0, 'foo') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(fh) # Create with self.assertRaises(FUSEError) as cm: self.server._create(inode2.id, 'dir1', self.dir_mode(), os.O_RDWR, Ctx()) self.assertEqual(cm.exception.errno, errno.EPERM) # Setattr with self.assertRaises(FUSEError) as cm: self.server.setattr(inode2a.id, dict()) self.assertEqual(cm.exception.errno, errno.EPERM) # xattr with self.assertRaises(FUSEError) as cm: self.server.setxattr(inode2.id, 'name', 'value') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.removexattr(inode2.id, 'name') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.forget(self.server.open_inodes.items()) self.fsck() def test_remove_tree(self): inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, 'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, 'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) # Remove self.server.forget(self.server.open_inodes.items()) self.server.remove_tree(ROOT_INODE, 'source') for (id_p, name) in ((ROOT_INODE, 'source'), (inode1.id, 'file1'), (inode1.id, 'dir1'), (inode2.id, 'file2')): self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, id_p))) for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,))) self.fsck()