def test_get(self): attrs = { 'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) for (key, val) in attrs.items(): self.assertEqual(getattr(inode, key), val) # Create another inode self.cache.create_inode(**attrs) self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id, )) # Entry should still be in cache self.assertEqual(inode, self.cache[inode.id]) # Now it should be out of the cache for _ in range(inode_cache.CACHE_SIZE + 1): self.cache.create_inode(**attrs) self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
def test_obj_refcounts(self): obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 42)') block_id_1 = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'foo'))) block_id_2 = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'bar'))) self.backend['s3ql_data_%d' % obj_id] = b'foo and bar' inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 2048)) self._link(b'test-entry', inode) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id_1)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 2, block_id_2)) self.assert_fsck(self.fsck.check_objects_refcount)
def test_get(self): attrs = {'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) for (key, val) in attrs.items(): self.assertEqual(getattr(inode, key), val) # Create another inode self.cache.create_inode(**attrs) self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,)) # Entry should still be in cache self.assertEqual(inode, self.cache[inode.id]) # Now it should be out of the cache for _ in range(inode_cache.CACHE_SIZE + 1): self.cache.create_inode(**attrs) self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
def test_inodes_size(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self._link(b'test-entry', id_) obj_id = self.db.rowid('INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36)) block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 512, sha256(b'foo'))) self.backend['s3ql_data_%d' % obj_id] = b'foo' # Case 1 self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.assert_fsck(self.fsck.check_inodes_size) # Case 2 self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_)) self.assert_fsck(self.fsck.check_inodes_size) # Case 3 self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) self.db.execute('UPDATE blocks SET refcount = refcount + 1 WHERE id = ?', (block_id,)) self.assert_fsck(self.fsck.check_inodes_size)
def test_wrong_block_refcount(self): obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 23)') self.backend['s3ql_data_%d' % obj_id] = b'foo' block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b''))) inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, self.max_obj_size)) self._link(b'test-entry', inode) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 0, block_id)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.assert_fsck(self.fsck.check_blocks_refcount)
def test_cache(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 8)) self._link(b'test-entry', inode) # Create new block fh = open(self.cachedir + '/%d-0' % inode, 'wb') fh.write(b'somedata') fh.close() self.assert_fsck(self.fsck.check_cache) self.assertEqual(self.backend['s3ql_data_1'], b'somedata') # Existing block self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 8, inode)) with open(self.cachedir + '/%d-1' % inode, 'wb') as fh: fh.write(b'somedata') self.assert_fsck(self.fsck.check_cache) # Old block preserved with open(self.cachedir + '/%d-0' % inode, 'wb') as fh: fh.write(b'somedat2') self.assert_fsck(self.fsck.check_cache) # Old block removed with open(self.cachedir + '/%d-1' % inode, 'wb') as fh: fh.write(b'somedat3') self.assert_fsck(self.fsck.check_cache)
def test_orphaned_inode(self): self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self.assert_fsck(self.fsck.check_inodes_refcount)
def test_unix_blocks(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 32)') block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors)
def test_wrong_inode_refcount(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self._link(b'name1', inode) self._link(b'name2', inode) self.assert_fsck(self.fsck.check_inodes_refcount)
def test_unix_symlink_no_target(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors)
def test_contents_name(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (42, inode, ROOT_INODE)) self.assert_fsck(self.fsck.check_contents_name)
def test_inode_blocks_block_id(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, 35)) self._link(b'test-entry', id_) self.assert_fsck(self.fsck.check_inode_blocks_block_id)
def test_name_refcount(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 2, 0)) self._link(b'name1', inode) self._link(b'name2', inode) self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?', (b'name1',)) self.assert_fsck(self.fsck.check_names_refcount)
def test_ext_attrs_name(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self._link(b'test-entry', id_) self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)', (34, id_, b'some value')) self.assert_fsck(self.fsck.check_ext_attributes_name)
def test_del(self): attrs = {'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) del self.cache[inode.id] self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,))) self.assertRaises(KeyError, self.cache.__delitem__, inode.id)
def test_missing_obj(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 128)) id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, block_id)) self._link(b'test-entry', id_) self.assert_fsck(self.fsck.check_objects_id)
def test_unix_child(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(b'foo'), ROOT_INODE, inode)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors)
def test_unix_target(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, 'foo')) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors)
def test_unix_rdev(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors)
def test_create(self): attrs = {'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) for key in list(attrs.keys()): self.assertEqual(attrs[key], getattr(inode, key)) self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
def test_del(self): attrs = { 'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) del self.cache[inode.id] self.assertFalse( self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id, ))) self.assertRaises(KeyError, self.cache.__delitem__, inode.id)
def test_blocks_checksum(self): id_ = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 8)) self._link(b'test-entry', id_) # Assume that due to a crash we did not write the hash for the block self.backend['s3ql_data_4364'] = b'Testdata' self.db.execute( 'INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (4364, 1, 8)) block_id = self.db.execute( 'INSERT INTO blocks (obj_id, refcount, size) VALUES(?, ?, ?)', (4364, 1, 8)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) # Should pick up wrong hash and delete objects self.fsck.found_errors = False self.fsck.check_blocks_checksum() assert self.fsck.found_errors self.fsck.found_errors = False self.fsck.check_blocks_checksum() assert not self.fsck.found_errors # Should save files in lost+found self.fsck.found_errors = False self.fsck.check() assert self.fsck.found_errors # Now everything should be good self.fsck.found_errors = False self.fsck.check() assert not self.fsck.found_errors assert not self.db.has_val( 'SELECT block_id FROM inode_blocks WHERE inode=?', (id_, )) inode_p = self.db.get_val( 'SELECT parent_inode FROM contents_v WHERE inode=?', (id_, )) lof_id = self.db.get_val( "SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) assert inode_p == lof_id
def test_loops(self): # Create some directory inodes inodes = [ self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1)) for dummy in range(3) ] inodes.append(inodes[0]) last = inodes[0] for inode in inodes[1:]: self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)', (self._add_name(str(inode).encode()), inode, last)) last = inode self.assert_fsck(self.fsck.check_loops)
def test_unix_size_symlink(self): inode = 42 target = b'some funny random string' self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, len(target))) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, target)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors)
def ctx(): ctx = Namespace() ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') ctx.backend_pool = BackendPool(lambda: local.Backend( Namespace(storage_url='local://' + ctx.backend_dir))) ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') ctx.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() ctx.dbfile = tempfile.NamedTemporaryFile(delete=False) ctx.db = Connection(ctx.dbfile.name) create_tables(ctx.db) init_tables(ctx.db) # Create an inode we can work with ctx.inode = 42 now_ns = time_ns() ctx.db.execute( "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (ctx.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32)) cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache", ctx.max_obj_size * 100) ctx.cache = cache # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(ctx, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Tested methods assume that they are called from # file system request handler s3ql.block_cache.lock = MockLock() s3ql.block_cache.lock_released = MockLock() try: yield ctx finally: ctx.cache.backend_pool = ctx.backend_pool ctx.cache.destroy() shutil.rmtree(ctx.cachedir) shutil.rmtree(ctx.backend_dir) ctx.dbfile.close() os.unlink(ctx.dbfile.name)
def test_create(self): attrs = { 'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) for key in list(attrs.keys()): self.assertEqual(attrs[key], getattr(inode, key)) self.assertTrue( self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id, )))
async def ctx(): ctx = Namespace() ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') ctx.backend_pool = BackendPool(lambda: local.Backend( Namespace(storage_url='local://' + ctx.backend_dir))) ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') ctx.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() ctx.dbfile = tempfile.NamedTemporaryFile(delete=False) ctx.db = Connection(ctx.dbfile.name) create_tables(ctx.db) init_tables(ctx.db) # Create an inode we can work with ctx.inode = 42 now_ns = time_ns() ctx.db.execute( "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (ctx.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32)) cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache", ctx.max_obj_size * 100) cache.portal = trio.BlockingTrioPortal() ctx.cache = cache # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyChannel: async def send(self, arg): await trio.run_sync_in_worker_thread(cache._do_upload, *arg) cache.to_upload = (DummyChannel(), None) try: yield ctx finally: ctx.cache.backend_pool = ctx.backend_pool if ctx.cache.destroy is not None: await ctx.cache.destroy() shutil.rmtree(ctx.cachedir) shutil.rmtree(ctx.backend_dir) ctx.dbfile.close() os.unlink(ctx.dbfile.name)
def test_unix_nomode_reg(self): perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP stamp = time_ns() inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) self._link(b'test-entry', inode) self.assert_fsck(self.fsck.check_unix) newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode,)) self.assertEqual(stat.S_IMODE(newmode), perms) self.assertEqual(stat.S_IFMT(newmode), stat.S_IFREG)
def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') self.backend_pool = BackendPool( lambda: local.Backend('local://' + self.backend_dir, None, None)) self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Create an inode we can work with self.inode = 42 now_ns = time_ns() self.db.execute( "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32)) cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 100) self.cache = cache # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(self, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire()
def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir, None, None)) self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Create an inode we can work with self.inode = 42 now_ns = time_ns() self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32)) cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 100) self.cache = cache # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(self, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire()
def test_inodes_size(self): id_ = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self._link(b'test-entry', id_) block_size = self.max_obj_size // 3 obj_id = self.db.rowid( 'INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36)) block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, block_size, sha256(b'foo'))) self.backend['s3ql_data_%d' % obj_id] = b'foo' # One block, no holes, size plausible self.db.execute('UPDATE inodes SET size=? WHERE id=?', (block_size, id_)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) self.fsck.found_errors = False self.fsck.check() assert not self.fsck.found_errors # One block, size not plausible self.db.execute('UPDATE inodes SET size=? WHERE id=?', (block_size - 1, id_)) self.assert_fsck(self.fsck.check_inodes_size) # Two blocks, hole at the beginning, size plausible self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_, )) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + block_size, id_)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.fsck.found_errors = False self.fsck.check() assert not self.fsck.found_errors # Two blocks, no holes, size plausible self.db.execute('UPDATE blocks SET refcount = 2 WHERE id = ?', (block_id, )) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) self.fsck.found_errors = False self.fsck.check() assert not self.fsck.found_errors # Two blocks, size not plausible self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + block_size - 1, id_)) self.assert_fsck(self.fsck.check_inodes_size) # Two blocks, hole at the end, size plausible self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + block_size + 1, id_)) self.fsck.found_errors = False self.fsck.check() assert not self.fsck.found_errors # Two blocks, size not plausible self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size, id_)) self.assert_fsck(self.fsck.check_inodes_size)