def runTest(self): skip_without_rsync() ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-') try: populate_dir(ref_dir) # Make file system and fake high inode number self.mkfs() db = Connection( get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?', (2**31 + 10, 'inodes')) db.close() # Copy source data self.mount() subprocess.check_call( ['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/']) self.umount() # Check that inode watermark is high db = Connection( get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') self.assertGreater( db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes', )), 2**31 + 10) self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2**31 + 10) db.close() # Renumber inodes self.fsck() # Check if renumbering was done db = Connection( get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') self.assertLess( db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes', )), 2**31) self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2**31) db.close() # Compare self.mount() try: out = check_output([ 'rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', ref_dir + '/', self.mnt_dir + '/' ], universal_newlines=True, stderr=subprocess.STDOUT) except CalledProcessError as exc: self.fail('rsync failed with ' + exc.output) if out: self.fail('Copy not equal to original, rsync says:\n' + out) self.umount() finally: shutil.rmtree(ref_dir)
def runTest(self): try: subprocess.call(['rsync', '--version'], stderr=subprocess.STDOUT, stdout=open('/dev/null', 'wb')) except OSError as exc: if exc.errno == errno.ENOENT: raise unittest.SkipTest('rsync not installed') raise ref_dir = tempfile.mkdtemp() try: populate_dir(ref_dir) # Make file system and fake high inode number self.mkfs() db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?', (2 ** 31 + 10, u'inodes')) db.close() # Copy source data self.mount() subprocess.check_call(['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/']) self.umount() # Check that inode watermark is high db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') self.assertGreater(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31 + 10) self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31 + 10) db.close() # Renumber inodes self.fsck() # Check if renumbering was done db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') self.assertLess(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31) self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31) db.close() # Compare self.mount() rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', ref_dir + '/', self.mnt_dir + '/'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = rsync.communicate()[0] if out: self.fail('Copy not equal to original, rsync says:\n' + out) elif rsync.returncode != 0: self.fail('rsync failed with ' + out) self.umount() finally: shutil.rmtree(ref_dir)
def test(self): skip_without_rsync() ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-') try: populate_dir(ref_dir) # Make file system and fake high inode number self.mkfs() db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?', (2 ** 31 + 10, 'inodes')) db.close() # Copy source data self.mount() subprocess.check_call(['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/']) self.umount() # Check that inode watermark is high db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes',)) > 2 ** 31 + 10 assert db.get_val('SELECT MAX(id) FROM inodes') > 2 ** 31 + 10 db.close() # Renumber inodes self.fsck() # Check if renumbering was done db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes',)) < 2 ** 31 assert db.get_val('SELECT MAX(id) FROM inodes') < 2 ** 31 db.close() # Compare self.mount() try: out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True, stderr=subprocess.STDOUT) except CalledProcessError as exc: pytest.fail('rsync failed with ' + exc.output) if out: pytest.fail('Copy not equal to original, rsync says:\n' + out) self.umount() finally: shutil.rmtree(ref_dir)
class fsck_tests(unittest.TestCase): def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') self.backend = local.Backend( Namespace(storage_url='local://' + self.backend_dir)) self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) self.fsck = Fsck(self.cachedir, self.backend, {'max_obj_size': self.max_obj_size}, self.db) self.fsck.expect_errors = True def tearDown(self): shutil.rmtree(self.cachedir) shutil.rmtree(self.backend_dir) self.dbfile.close() def assert_fsck(self, fn): '''Check that fn detects and corrects an error''' self.fsck.found_errors = False fn() self.assertTrue(self.fsck.found_errors) self.fsck.found_errors = False self.fsck.check() self.assertFalse(self.fsck.found_errors) def test_cache(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 8)) self._link(b'test-entry', inode) # Create new block fh = open(self.cachedir + '/%d-0' % inode, 'wb') fh.write(b'somedata') fh.close() self.assert_fsck(self.fsck.check_cache) self.assertEqual(self.backend['s3ql_data_1'], b'somedata') # Existing block self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 8, inode)) with open(self.cachedir + '/%d-1' % inode, 'wb') as fh: fh.write(b'somedata') self.assert_fsck(self.fsck.check_cache) # Old block preserved with open(self.cachedir + '/%d-0' % inode, 'wb') as fh: fh.write(b'somedat2') self.assert_fsck(self.fsck.check_cache) # Old block removed with open(self.cachedir + '/%d-1' % inode, 'wb') as fh: fh.write(b'somedat3') self.assert_fsck(self.fsck.check_cache) def test_lof1(self): # Make lost+found a file inode = self.db.get_val( "SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode, )) self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?', (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode)) def check(): self.fsck.check_lof() self.fsck.check_inodes_refcount() self.assert_fsck(check) def test_lof2(self): # Remove lost+found name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found', )) inode = self.db.get_val( 'SELECT inode FROM contents WHERE name_id=? AND ' 'parent_inode=?', (name_id, ROOT_INODE)) self.db.execute('DELETE FROM inodes WHERE id=?', (inode, )) self.db.execute( 'DELETE FROM contents WHERE name_id=? and parent_inode=?', (name_id, ROOT_INODE)) self.db.execute('UPDATE names SET refcount = refcount-1 WHERE id=?', (name_id, )) self.assert_fsck(self.fsck.check_lof) def test_wrong_inode_refcount(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self._link(b'name1', inode) self._link(b'name2', inode) self.assert_fsck(self.fsck.check_inodes_refcount) def test_orphaned_inode(self): self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self.assert_fsck(self.fsck.check_inodes_refcount) def test_name_refcount(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 2, 0)) self._link(b'name1', inode) self._link(b'name2', inode) self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?', (b'name1', )) self.assert_fsck(self.fsck.check_names_refcount) def test_orphaned_name(self): self._add_name(b'zupbrazl') self.assert_fsck(self.fsck.check_names_refcount) def test_contents_inode(self): self.db.execute( 'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(b'foobar'), 124, ROOT_INODE)) self.assert_fsck(self.fsck.check_contents_inode) def test_contents_inode_p(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self.db.execute( 'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(b'foobar'), inode, 123)) self.assert_fsck(self.fsck.check_contents_parent_inode) def test_contents_name(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self.db.execute( 'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (42, inode, ROOT_INODE)) self.assert_fsck(self.fsck.check_contents_name) def _add_name(self, name): '''Get id for *name* and increase refcount Name is inserted in table if it does not yet exist. ''' try: name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (name, )) except NoSuchRowError: name_id = self.db.rowid( 'INSERT INTO names (name, refcount) VALUES(?,?)', (name, 1)) else: self.db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id, )) return name_id def _link(self, name, inode, parent_inode=ROOT_INODE): '''Link /*name* to *inode*''' self.db.execute( 'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(name), inode, parent_inode)) def test_inodes_size(self): id_ = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self._link(b'test-entry', id_) obj_id = self.db.rowid( 'INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36)) block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 512, sha256(b'foo'))) self.backend['s3ql_data_%d' % obj_id] = b'foo' # Case 1 self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.assert_fsck(self.fsck.check_inodes_size) # Case 2 self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_, )) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_)) self.assert_fsck(self.fsck.check_inodes_size) # Case 3 self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) self.db.execute( 'UPDATE blocks SET refcount = refcount + 1 WHERE id = ?', (block_id, )) self.assert_fsck(self.fsck.check_inodes_size) def test_objects_id(self): # Create an object that only exists in the backend self.backend['s3ql_data_4364'] = b'Testdata' self.assert_fsck(self.fsck.check_objects_id) # Create an object that does not exist in the backend self.db.execute( 'INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (34, 1, 27)) self.assert_fsck(self.fsck.check_objects_id) def test_blocks_checksum(self): id_ = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 8)) self._link(b'test-entry', id_) # Assume that due to a crash we did not write the hash for the block self.backend['s3ql_data_4364'] = b'Testdata' self.db.execute( 'INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (4364, 1, 8)) block_id = self.db.execute( 'INSERT INTO blocks (obj_id, refcount, size) VALUES(?, ?, ?)', (4364, 1, 8)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) # Should pick up wrong hash and delete objects self.fsck.found_errors = False self.fsck.check_blocks_checksum() assert self.fsck.found_errors self.fsck.found_errors = False self.fsck.check_blocks_checksum() assert not self.fsck.found_errors # Should save files in lost+found self.fsck.found_errors = False self.fsck.check() assert self.fsck.found_errors # Now everything should be good self.fsck.found_errors = False self.fsck.check() assert not self.fsck.found_errors assert not self.db.has_val( 'SELECT block_id FROM inode_blocks WHERE inode=?', (id_, )) inode_p = self.db.get_val( 'SELECT parent_inode FROM contents_v WHERE inode=?', (id_, )) lof_id = self.db.get_val( "SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) assert inode_p == lof_id def test_blocks_obj_id(self): block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, 48, 128)) id_ = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, block_id)) self._link(b'test-entry', id_) self.assert_fsck(self.fsck.check_blocks_obj_id) def test_missing_obj(self): obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 32)') block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 128)) id_ = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, block_id)) self._link(b'test-entry', id_) self.assert_fsck(self.fsck.check_objects_id) def test_inode_blocks_inode(self): obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 42)') self.backend['s3ql_data_%d' % obj_id] = b'foo' block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 34)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (27, 0, block_id)) self.assert_fsck(self.fsck.check_inode_blocks_inode) def test_inode_blocks_block_id(self): id_ = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, 35)) self._link(b'test-entry', id_) self.assert_fsck(self.fsck.check_inode_blocks_block_id) def test_symlinks_inode(self): self.db.execute( 'INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (42, b'somewhere else')) self.assert_fsck(self.fsck.check_symlinks_inode) def test_ext_attrs_inode(self): self.db.execute( 'INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)', (self._add_name(b'some name'), 34, b'some value')) self.assert_fsck(self.fsck.check_ext_attributes_inode) def test_ext_attrs_name(self): id_ = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self._link(b'test-entry', id_) self.db.execute( 'INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)', (34, id_, b'some value')) self.assert_fsck(self.fsck.check_ext_attributes_name) @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def test_loops(self): # Create some directory inodes inodes = [ self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1)) for dummy in range(3) ] inodes.append(inodes[0]) last = inodes[0] for inode in inodes[1:]: self.db.execute( 'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)', (self._add_name(str(inode).encode()), inode, last)) last = inode self.assert_fsck(self.fsck.check_loops) def test_tmpfile(self): # Ensure that path exists objname = 's3ql_data_38375' self.backend[objname] = b'bla' del self.backend[objname] path = self.backend._key_to_path(objname) tmpname = '%s#%d-%d.tmp' % (path, os.getpid(), _thread.get_ident()) with open(tmpname, 'wb') as fh: fh.write(b'Hello, world') self.assert_fsck(self.fsck.check_objects_temp) def test_obj_refcounts(self): obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 42)') block_id_1 = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'foo'))) block_id_2 = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'bar'))) self.backend['s3ql_data_%d' % obj_id] = b'foo and bar' inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 2048)) self._link(b'test-entry', inode) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id_1)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 2, block_id_2)) self.assert_fsck(self.fsck.check_objects_refcount) def test_orphaned_obj(self): self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 33)') self.assert_fsck(self.fsck.check_objects_refcount) def test_wrong_block_refcount(self): obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 23)') self.backend['s3ql_data_%d' % obj_id] = b'foo' block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b''))) inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, self.max_obj_size)) self._link(b'test-entry', inode) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 0, block_id)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.assert_fsck(self.fsck.check_blocks_refcount) def test_orphaned_block(self): obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 24)') self.backend['s3ql_data_%d' % obj_id] = b'foo' self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size, hash) VALUES(?,?,?,?)', (1, obj_id, 3, sha256(b'xyz'))) self.assert_fsck(self.fsck.check_blocks_refcount) def test_unix_size(self): inode = 42 self.db.execute( "INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 0)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode, )) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_size_symlink(self): inode = 42 target = b'some funny random string' self.db.execute( "INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, len(target))) self.db.execute( 'INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, target)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode, )) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_target(self): inode = 42 self.db.execute( "INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute( 'INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, 'foo')) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_nomode_reg(self): perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP stamp = time_ns() inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) self._link(b'test-entry', inode) self.assert_fsck(self.fsck.check_unix) newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode, )) self.assertEqual(stat.S_IMODE(newmode), perms) self.assertEqual(stat.S_IFMT(newmode), stat.S_IFREG) def test_unix_nomode_dir(self): perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP stamp = time_ns() inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) inode2 = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms | stat.S_IFREG, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) self._link(b'test-entry', inode) self._link(b'subentry', inode2, inode) self.assert_fsck(self.fsck.check_unix) newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode, )) self.assertEqual(stat.S_IMODE(newmode), perms) self.assertEqual(stat.S_IFMT(newmode), stat.S_IFDIR) def test_unix_symlink_no_target(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_rdev(self): inode = 42 self.db.execute( "INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_child(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute( 'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(b'foo'), ROOT_INODE, inode)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_blocks(self): inode = self.db.rowid( "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) obj_id = self.db.rowid( 'INSERT INTO objects (refcount, size) VALUES(1, 32)') block_id = self.db.rowid( 'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) self.db.execute( 'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors)
class fs_api_tests(unittest.TestCase): def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') plain_backend = local.Backend('local://' + self.backend_dir, None, None) self.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend)) self.backend = self.backend_pool.pop_conn() self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 5) self.block_cache = cache self.server = fs.Operations(cache, self.db, self.max_obj_size, InodeCache(self.db, 0)) self.server.init() # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(self, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Keep track of unused filenames self.name_cnt = 0 def tearDown(self): self.server.inodes.destroy() llfuse.lock.release() self.block_cache.destroy() shutil.rmtree(self.cachedir) shutil.rmtree(self.backend_dir) os.unlink(self.dbfile.name) self.dbfile.close() @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def fsck(self): self.block_cache.clear() self.server.inodes.flush() fsck = Fsck(self.cachedir + '/cache', self.backend, { 'max_obj_size': self.max_obj_size }, self.db) fsck.check() self.assertFalse(fsck.found_errors) def newname(self): self.name_cnt += 1 return ("s3ql_%d" % self.name_cnt).encode() def test_getattr_root(self): self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode)) self.fsck() def test_create(self): ctx = Ctx() mode = self.dir_mode() name = self.newname() inode_p_old = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) self.server._create(ROOT_INODE, name, mode, ctx) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON name_id = names.id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) inode = self.server.getattr(id_) self.assertEqual(inode.mode, mode) self.assertEqual(inode.uid, ctx.uid) self.assertEqual(inode.gid, ctx.gid) self.assertEqual(inode.refcount, 1) self.assertEqual(inode.size, 0) inode_p_new = self.server.getattr(ROOT_INODE) self.assertGreater(inode_p_new.mtime, inode_p_old.mtime) self.assertGreater(inode_p_new.ctime, inode_p_old.ctime) self.server.forget([(id_, 1)]) self.fsck() def test_extstat(self): # Test with zero contents self.server.extstat() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.extstat() # Test with data in file fh = self.server.open(inode.id, os.O_RDWR) self.server.write(fh, 0, b'foobar') self.server.release(fh) self.server.extstat() self.server.forget([(inode.id, 1)]) self.fsck() @staticmethod def dir_mode(): return (randint(0, 0o7777) & ~stat.S_IFDIR) | stat.S_IFDIR @staticmethod def file_mode(): return (randint(0, 0o7777) & ~stat.S_IFREG) | stat.S_IFREG def test_getxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.getxattr, inode.id, b'nonexistant-attr') self.server.setxattr(inode.id, b'my-attr', b'strabumm!') self.assertEqual(self.server.getxattr(inode.id, b'my-attr'), b'strabumm!') self.server.forget([(inode.id, 1)]) self.fsck() def test_link(self): name = self.newname() inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) safe_sleep(CLOCK_GRANULARITY) inode_before = self.server.getattr(inode.id).copy() self.server.link(inode.id, inode_p_new.id, name) inode_after = self.server.lookup(inode_p_new.id, name) inode_p_new_after = self.server.getattr(inode_p_new.id) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, inode_p_new.id)) self.assertEqual(inode_before.id, id_) self.assertEqual(inode_after.refcount, 2) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1), (inode_after.id, 1)]) self.fsck() def test_listxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertListEqual([], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, b'key1', b'blub') self.assertListEqual([b'key1'], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, b'key2', b'blub') self.assertListEqual(sorted([b'key1', b'key2']), sorted(self.server.listxattr(inode.id))) self.server.forget([(inode.id, 1)]) self.fsck() def test_read(self): len_ = self.max_obj_size data = self.random_data(len_) off = self.max_obj_size // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, off, data) inode_before = self.server.getattr(inode.id).copy() safe_sleep(CLOCK_GRANULARITY) self.assertTrue(self.server.read(fh, off, len_) == data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.atime, inode_before.atime) self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off]) self.assertTrue(self.server.read(fh, self.max_obj_size, len_) == data[off:]) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_readdir(self): # Create a few entries names = [ ('entry_%2d' % i).encode() for i in range(20) ] for name in names: (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode.id, 1)]) # Delete some to make sure that we don't have continous rowids remove_no = [0, 2, 3, 5, 9] for i in remove_no: self.server.unlink(ROOT_INODE, names[i]) del names[i] # Read all fh = self.server.opendir(ROOT_INODE) self.assertListEqual(sorted(names + [b'lost+found']) , sorted(x[0] for x in self.server.readdir(fh, 0))) self.server.releasedir(fh) # Read in parts fh = self.server.opendir(ROOT_INODE) entries = list() try: next_ = 0 while True: gen = self.server.readdir(fh, next_) for _ in range(3): (name, _, next_) = next(gen) entries.append(name) except StopIteration: pass self.assertListEqual(sorted(names + [b'lost+found']) , sorted(entries)) self.server.releasedir(fh) self.fsck() def test_forget(self): name = self.newname() # Test that entries are deleted when they're no longer referenced (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'foobar') self.server.unlink(ROOT_INODE, name) self.assertFalse(self.db.has_val('SELECT 1 FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.server.getattr(inode.id).id) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_removexattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.removexattr, inode.id, b'some name') self.server.setxattr(inode.id, b'key1', b'blub') self.server.removexattr(inode.id, b'key1') self.assertListEqual([], self.server.listxattr(inode.id)) self.server.forget([(inode.id, 1)]) self.fsck() def test_rename(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id == name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.fsck() def test_replace_file(self): oldname = self.newname() newname = self.newname() (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'some data to deal with') self.server.release(fh) self.server.setxattr(inode.id, b'test_xattr', b'42*8') inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'even more data to deal with') self.server.release(fh) self.server.setxattr(inode2.id, b'test_xattr', b'42*8') self.server.forget([(inode2.id, 1)]) safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,))) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.fsck() def test_replace_dir(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx()) self.server.forget([(inode2.id, 1)]) safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,))) self.fsck() def test_setattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0o641, os.O_RDWR, Ctx()) self.server.release(fh) inode_old = self.server.getattr(inode.id).copy() attr = llfuse.EntryAttributes() attr.st_mode = self.file_mode() attr.st_uid = randint(0, 2 ** 32) attr.st_gid = None attr.st_atime = randint(0, 2 ** 32) / 10 ** 6 attr.st_mtime = randint(0, 2 ** 32) / 10 ** 6 safe_sleep(CLOCK_GRANULARITY) self.server.setattr(inode.id, attr) inode_new = self.server.getattr(inode.id) self.assertGreater(inode_new.ctime, inode_old.ctime) for key in attr.__slots__: if key in ('st_mode', 'st_uid', 'st_atime', 'st_mtime'): self.assertEqual(getattr(attr, key), getattr(inode_new, key), '%s mismatch' % key) elif key != 'st_ctime': self.assertEqual(getattr(inode_old, key), getattr(inode_new, key), '%s mismatch' % key) self.server.forget([(inode.id, 1)]) self.fsck() def test_truncate(self): len_ = int(2.7 * self.max_obj_size) data = self.random_data(len_) attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) attr.st_size = len_ // 2 self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2]) attr.st_size = len_ self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2] + b'\0' * (len_ // 2)) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_truncate_0(self): len1 = 158 len2 = 133 attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, self.random_data(len1)) self.server.release(fh) self.server.inodes.flush() fh = self.server.open(inode.id, os.O_RDWR) attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, self.random_data(len2)) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_setxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.setxattr(inode.id, b'my-attr', b'strabumm!') self.assertEqual(self.server.getxattr(inode.id, b'my-attr'), b'strabumm!') self.server.forget([(inode.id, 1)]) self.fsck() def test_names(self): name1 = self.newname() name2 = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name1, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode.id, 1)]) (fh, inode) = self.server.create(ROOT_INODE, name2, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.setxattr(inode.id, name1, b'strabumm!') self.fsck() self.server.removexattr(inode.id, name1) self.fsck() self.server.setxattr(inode.id, name1, b'strabumm karacho!!') self.server.unlink(ROOT_INODE, name1) self.server.forget([(inode.id, 1)]) self.fsck() def test_statfs(self): # Test with zero contents self.server.statfs() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.statfs() # Test with data in file fh = self.server.open(inode.id, os.O_RDWR) self.server.write(fh, 0, b'foobar') self.server.release(fh) self.server.forget([(inode.id, 1)]) self.server.statfs() def test_symlink(self): target = self.newname() name = self.newname() inode_p_before = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) inode = self.server.symlink(ROOT_INODE, name, target, Ctx()) inode_p_after = self.server.getattr(ROOT_INODE) self.assertEqual(target, self.server.readlink(inode.id)) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.server.forget([(inode.id, 1)]) self.fsck() def test_unlink(self): name = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'some data to deal with') self.server.release(fh) # Add extended attributes self.server.setxattr(inode.id, b'test_xattr', b'42*8') self.server.forget([(inode.id, 1)]) inode_p_before = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) self.server.unlink(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_rmdir(self): name = self.newname() inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx()) self.server.forget([(inode.id, 1)]) inode_p_before = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) self.server.rmdir(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_relink(self): name = self.newname() name2 = self.newname() data = b'some data to deal with' (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.release(fh) self.server.unlink(ROOT_INODE, name) self.server.inodes.flush() self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.server.link(inode.id, ROOT_INODE, name2) self.server.forget([(inode.id, 2)]) inode = self.server.lookup(ROOT_INODE, name2) fh = self.server.open(inode.id, os.O_RDONLY) self.assertTrue(self.server.read(fh, 0, len(data)) == data) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_write(self): len_ = self.max_obj_size data = self.random_data(len_) off = self.max_obj_size // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) inode_before = self.server.getattr(inode.id).copy() safe_sleep(CLOCK_GRANULARITY) self.server.write(fh, off, data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.mtime, inode_before.mtime) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertEqual(inode_after.size, off + len_) self.server.write(fh, 0, data) inode_after = self.server.getattr(inode.id) self.assertEqual(inode_after.size, off + len_) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_failsafe(self): len_ = self.max_obj_size data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.cache.clear() self.assertTrue(self.server.failsafe is False) datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1') shutil.copy(datafile, datafile + '.bak') # Modify contents with open(datafile, 'rb+') as rfh: rfh.seek(560) rfh.write(b'blrub!') with self.assertRaises(FUSEError) as cm: with catch_logmsg('^Backend returned malformed data for', count=1, level=logging.ERROR): self.server.read(fh, 0, len_) self.assertEqual(cm.exception.errno, errno.EIO) self.assertTrue(self.server.failsafe) # Restore contents, but should be marked as damaged now os.rename(datafile + '.bak', datafile) with self.assertRaises(FUSEError) as cm: self.server.read(fh, 0, len_) self.assertEqual(cm.exception.errno, errno.EIO) # Release and re-open, now we should be able to access again self.server.release(fh) self.server.forget([(inode.id, 1)]) # ..but not write access since we are in failsafe mode with self.assertRaises(FUSEError) as cm: self.server.open(inode.id, os.O_RDWR) self.assertEqual(cm.exception.errno, errno.EPERM) # ..ready only is fine. fh = self.server.open(inode.id, os.O_RDONLY) self.server.read(fh, 0, len_) # Remove completely, should give error after cache flush os.unlink(datafile) self.server.read(fh, 3, len_//2) self.server.cache.clear() with self.assertRaises(FUSEError) as cm: with catch_logmsg('^Backend lost block', count=1, level=logging.ERROR): self.server.read(fh, 5, len_//2) self.assertEqual(cm.exception.errno, errno.EIO) # Don't call fsck, we're missing a block def test_create_open(self): name = self.newname() # Create a new file (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode, 1)]) # Open it atomically (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode, 1)]) self.fsck() def test_edit(self): len_ = self.max_obj_size data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.release(fh) self.block_cache.clear() fh = self.server.open(inode.id, os.O_RDWR) attr = llfuse.EntryAttributes() attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, data[50:]) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_copy_tree(self): ext_attr_name = b'system.foo.brazl' ext_attr_val = b'schulla dku woumm bramp' src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx()) dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(), Ctx()) # Create file (fh, f1_inode) = self.server.create(src_inode.id, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) self.server.setxattr(f1_inode.id, ext_attr_name, ext_attr_val) # Create hardlink (fh, f2_inode) = self.server.create(src_inode.id, b'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) f2_inode = self.server.link(f2_inode.id, src_inode.id, b'file2_hardlink') # Create subdirectory d1_inode = self.server.mkdir(src_inode.id, b'dir1', self.dir_mode(), Ctx()) d2_inode = self.server.mkdir(d1_inode.id, b'dir2', self.dir_mode(), Ctx()) # ..with a 3rd hardlink f2_inode = self.server.link(f2_inode.id, d1_inode.id, b'file2_hardlink') # Replicate self.server.copy_tree(src_inode.id, dst_inode.id) # Change files fh = self.server.open(f1_inode.id, os.O_RDWR) self.server.write(fh, 0, b'new file1 contents') self.server.release(fh) fh = self.server.open(f2_inode.id, os.O_RDWR) self.server.write(fh, 0, b'new file2 contents') self.server.release(fh) # Get copy properties f1_inode_c = self.server.lookup(dst_inode.id, b'file1') f2_inode_c = self.server.lookup(dst_inode.id, b'file2') f2h_inode_c = self.server.lookup(dst_inode.id, b'file2_hardlink') d1_inode_c = self.server.lookup(dst_inode.id, b'dir1') d2_inode_c = self.server.lookup(d1_inode_c.id, b'dir2') f2_h_inode_c = self.server.lookup(d1_inode_c.id, b'file2_hardlink') # Check file1 fh = self.server.open(f1_inode_c.id, os.O_RDWR) self.assertEqual(self.server.read(fh, 0, 42), b'file1 contents') self.server.release(fh) self.assertNotEqual(f1_inode.id, f1_inode_c.id) self.assertEqual(self.server.getxattr(f1_inode_c.id, ext_attr_name), ext_attr_val) # Check file2 fh = self.server.open(f2_inode_c.id, os.O_RDWR) self.assertTrue(self.server.read(fh, 0, 42) == b'file2 contents') self.server.release(fh) self.assertEqual(f2_inode_c.id, f2h_inode_c.id) self.assertEqual(f2_inode_c.refcount, 3) self.assertNotEqual(f2_inode.id, f2_inode_c.id) self.assertEqual(f2_h_inode_c.id, f2_inode_c.id) # Check subdir1 self.assertNotEqual(d1_inode.id, d1_inode_c.id) self.assertNotEqual(d2_inode.id, d2_inode_c.id) self.server.forget(list(self.server.open_inodes.items())) self.fsck() def test_copy_tree_2(self): src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx()) dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(), Ctx()) # Create file (fh, inode) = self.server.create(src_inode.id, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'block 1 contents') self.server.write(fh, self.max_obj_size, b'block 1 contents') self.server.release(fh) self.server.forget([(inode.id, 1)]) self.server.copy_tree(src_inode.id, dst_inode.id) self.server.forget([(src_inode.id, 1), (dst_inode.id, 1)]) self.fsck() def test_lock_tree(self): inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, b'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, b'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) # Another file (fh, inode3) = self.server.create(ROOT_INODE, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) # Lock self.server.lock_tree(inode1.id) for i in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertTrue(self.server.inodes[i].locked) # Remove with self.assertRaises(FUSEError) as cm: self.server._remove(inode1.id, b'file1', inode1a.id) self.assertEqual(cm.exception.errno, errno.EPERM) # Rename / Replace with self.assertRaises(FUSEError) as cm: self.server.rename(ROOT_INODE, b'file1', inode1.id, b'file2') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.rename(inode1.id, b'file1', ROOT_INODE, b'file2') self.assertEqual(cm.exception.errno, errno.EPERM) # Open with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_RDWR) self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_WRONLY) self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(self.server.open(inode3.id, os.O_WRONLY)) # Write fh = self.server.open(inode2a.id, os.O_RDONLY) with self.assertRaises(FUSEError) as cm: self.server.write(fh, 0, b'foo') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(fh) # Create with self.assertRaises(FUSEError) as cm: self.server._create(inode2.id, b'dir1', self.dir_mode(), os.O_RDWR, Ctx()) self.assertEqual(cm.exception.errno, errno.EPERM) # Setattr with self.assertRaises(FUSEError) as cm: self.server.setattr(inode2a.id, dict()) self.assertEqual(cm.exception.errno, errno.EPERM) # xattr with self.assertRaises(FUSEError) as cm: self.server.setxattr(inode2.id, b'name', b'value') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.removexattr(inode2.id, b'name') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.forget(list(self.server.open_inodes.items())) self.fsck() def test_remove_tree(self): inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, b'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, b'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) # Remove self.server.forget(list(self.server.open_inodes.items())) self.server.remove_tree(ROOT_INODE, b'source') for (id_p, name) in ((ROOT_INODE, b'source'), (inode1.id, b'file1'), (inode1.id, b'dir1'), (inode2.id, b'file2')): self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, id_p))) for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,))) self.fsck()
class fs_api_tests(unittest.TestCase): def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') plain_backend = local.Backend('local://' + self.backend_dir, None, None) self.backend_pool = BackendPool( lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend)) self.backend = self.backend_pool.pop_conn() self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 5) self.block_cache = cache self.server = fs.Operations(cache, self.db, self.max_obj_size, InodeCache(self.db, 0)) self.server.init() # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(self, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Keep track of unused filenames self.name_cnt = 0 def tearDown(self): self.server.inodes.destroy() llfuse.lock.release() self.block_cache.destroy() shutil.rmtree(self.cachedir) shutil.rmtree(self.backend_dir) os.unlink(self.dbfile.name) self.dbfile.close() @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def fsck(self): self.block_cache.clear() self.server.inodes.flush() fsck = Fsck(self.cachedir + '/cache', self.backend, {'max_obj_size': self.max_obj_size}, self.db) fsck.check() self.assertFalse(fsck.found_errors) def newname(self): self.name_cnt += 1 return ("s3ql_%d" % self.name_cnt).encode() def test_getattr_root(self): self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode)) self.fsck() def test_create(self): ctx = Ctx() mode = self.dir_mode() name = self.newname() inode_p_old = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) self.server._create(ROOT_INODE, name, mode, ctx) id_ = self.db.get_val( 'SELECT inode FROM contents JOIN names ON name_id = names.id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) inode = self.server.getattr(id_) self.assertEqual(inode.mode, mode) self.assertEqual(inode.uid, ctx.uid) self.assertEqual(inode.gid, ctx.gid) self.assertEqual(inode.refcount, 1) self.assertEqual(inode.size, 0) inode_p_new = self.server.getattr(ROOT_INODE) self.assertGreater(inode_p_new.mtime, inode_p_old.mtime) self.assertGreater(inode_p_new.ctime, inode_p_old.ctime) self.server.forget([(id_, 1)]) self.fsck() def test_extstat(self): # Test with zero contents self.server.extstat() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.extstat() # Test with data in file fh = self.server.open(inode.id, os.O_RDWR) self.server.write(fh, 0, b'foobar') self.server.release(fh) self.server.extstat() self.server.forget([(inode.id, 1)]) self.fsck() @staticmethod def dir_mode(): return (randint(0, 0o7777) & ~stat.S_IFDIR) | stat.S_IFDIR @staticmethod def file_mode(): return (randint(0, 0o7777) & ~stat.S_IFREG) | stat.S_IFREG def test_getxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.getxattr, inode.id, b'nonexistant-attr') self.server.setxattr(inode.id, b'my-attr', b'strabumm!') self.assertEqual(self.server.getxattr(inode.id, b'my-attr'), b'strabumm!') self.server.forget([(inode.id, 1)]) self.fsck() def test_link(self): name = self.newname() inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) safe_sleep(CLOCK_GRANULARITY) inode_before = self.server.getattr(inode.id).copy() self.server.link(inode.id, inode_p_new.id, name) inode_after = self.server.lookup(inode_p_new.id, name) inode_p_new_after = self.server.getattr(inode_p_new.id) id_ = self.db.get_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, inode_p_new.id)) self.assertEqual(inode_before.id, id_) self.assertEqual(inode_after.refcount, 2) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1), (inode_after.id, 1)]) self.fsck() def test_listxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertListEqual([], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, b'key1', b'blub') self.assertListEqual([b'key1'], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, b'key2', b'blub') self.assertListEqual(sorted([b'key1', b'key2']), sorted(self.server.listxattr(inode.id))) self.server.forget([(inode.id, 1)]) self.fsck() def test_read(self): len_ = self.max_obj_size data = self.random_data(len_) off = self.max_obj_size // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, off, data) inode_before = self.server.getattr(inode.id).copy() safe_sleep(CLOCK_GRANULARITY) self.assertTrue(self.server.read(fh, off, len_) == data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.atime, inode_before.atime) self.assertTrue( self.server.read(fh, 0, len_) == b"\0" * off + data[:off]) self.assertTrue( self.server.read(fh, self.max_obj_size, len_) == data[off:]) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_readdir(self): # Create a few entries names = [('entry_%2d' % i).encode() for i in range(20)] for name in names: (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode.id, 1)]) # Delete some to make sure that we don't have continous rowids remove_no = [0, 2, 3, 5, 9] for i in remove_no: self.server.unlink(ROOT_INODE, names[i]) del names[i] # Read all fh = self.server.opendir(ROOT_INODE) self.assertListEqual(sorted(names + [b'lost+found']), sorted(x[0] for x in self.server.readdir(fh, 0))) self.server.releasedir(fh) # Read in parts fh = self.server.opendir(ROOT_INODE) entries = list() try: next_ = 0 while True: gen = self.server.readdir(fh, next_) for _ in range(3): (name, _, next_) = next(gen) entries.append(name) except StopIteration: pass self.assertListEqual(sorted(names + [b'lost+found']), sorted(entries)) self.server.releasedir(fh) self.fsck() def test_forget(self): name = self.newname() # Test that entries are deleted when they're no longer referenced (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'foobar') self.server.unlink(ROOT_INODE, name) self.assertFalse( self.db.has_val( 'SELECT 1 FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.server.getattr(inode.id).id) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.assertFalse( self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id, ))) self.fsck() def test_removexattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.removexattr, inode.id, b'some name') self.server.setxattr(inode.id, b'key1', b'blub') self.server.removexattr(inode.id, b'key1') self.assertListEqual([], self.server.listxattr(inode.id)) self.server.forget([(inode.id, 1)]) self.fsck() def test_rename(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse( self.db.has_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val( 'SELECT inode FROM contents JOIN names ON names.id == name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.fsck() def test_replace_file(self): oldname = self.newname() newname = self.newname() (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'some data to deal with') self.server.release(fh) self.server.setxattr(inode.id, b'test_xattr', b'42*8') inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'even more data to deal with') self.server.release(fh) self.server.setxattr(inode2.id, b'test_xattr', b'42*8') self.server.forget([(inode2.id, 1)]) safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse( self.db.has_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.assertFalse( self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id, ))) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.fsck() def test_replace_dir(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx()) self.server.forget([(inode2.id, 1)]) safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse( self.db.has_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.server.forget([(inode.id, 1), (inode_p_new.id, 1)]) self.assertFalse( self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id, ))) self.fsck() def test_setattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0o641, os.O_RDWR, Ctx()) self.server.release(fh) inode_old = self.server.getattr(inode.id).copy() attr = llfuse.EntryAttributes() attr.st_mode = self.file_mode() attr.st_uid = randint(0, 2**32) attr.st_gid = randint(0, 2**32) attr.st_atime = randint(0, 2**32) / 10**6 attr.st_mtime = randint(0, 2**32) / 10**6 safe_sleep(CLOCK_GRANULARITY) self.server.setattr(inode.id, attr) inode_new = self.server.getattr(inode.id) self.assertGreater(inode_new.ctime, inode_old.ctime) for key in attr.__slots__: if getattr(attr, key) is not None: self.assertEqual(getattr(attr, key), getattr(inode_new, key)) self.server.forget([(inode.id, 1)]) self.fsck() def test_truncate(self): len_ = int(2.7 * self.max_obj_size) data = self.random_data(len_) attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) attr.st_size = len_ // 2 self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2]) attr.st_size = len_ self.server.setattr(inode.id, attr) self.assertTrue( self.server.read(fh, 0, len_) == data[:len_ // 2] + b'\0' * (len_ // 2)) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_truncate_0(self): len1 = 158 len2 = 133 attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, self.random_data(len1)) self.server.release(fh) self.server.inodes.flush() fh = self.server.open(inode.id, os.O_RDWR) attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, self.random_data(len2)) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_setxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.setxattr(inode.id, b'my-attr', b'strabumm!') self.assertEqual(self.server.getxattr(inode.id, b'my-attr'), b'strabumm!') self.server.forget([(inode.id, 1)]) self.fsck() def test_names(self): name1 = self.newname() name2 = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name1, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode.id, 1)]) (fh, inode) = self.server.create(ROOT_INODE, name2, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.setxattr(inode.id, name1, b'strabumm!') self.fsck() self.server.removexattr(inode.id, name1) self.fsck() self.server.setxattr(inode.id, name1, b'strabumm karacho!!') self.server.unlink(ROOT_INODE, name1) self.server.forget([(inode.id, 1)]) self.fsck() def test_statfs(self): # Test with zero contents self.server.statfs() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.statfs() # Test with data in file fh = self.server.open(inode.id, os.O_RDWR) self.server.write(fh, 0, b'foobar') self.server.release(fh) self.server.forget([(inode.id, 1)]) self.server.statfs() def test_symlink(self): target = self.newname() name = self.newname() inode_p_before = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) inode = self.server.symlink(ROOT_INODE, name, target, Ctx()) inode_p_after = self.server.getattr(ROOT_INODE) self.assertEqual(target, self.server.readlink(inode.id)) id_ = self.db.get_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.server.forget([(inode.id, 1)]) self.fsck() def test_unlink(self): name = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'some data to deal with') self.server.release(fh) # Add extended attributes self.server.setxattr(inode.id, b'test_xattr', b'42*8') self.server.forget([(inode.id, 1)]) inode_p_before = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) self.server.unlink(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse( self.db.has_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse( self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id, ))) self.fsck() def test_rmdir(self): name = self.newname() inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx()) self.server.forget([(inode.id, 1)]) inode_p_before = self.server.getattr(ROOT_INODE).copy() safe_sleep(CLOCK_GRANULARITY) self.server.rmdir(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse( self.db.has_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse( self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id, ))) self.fsck() def test_relink(self): name = self.newname() name2 = self.newname() data = b'some data to deal with' (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.release(fh) self.server.unlink(ROOT_INODE, name) self.server.inodes.flush() self.assertFalse( self.db.has_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue( self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id, ))) self.server.link(inode.id, ROOT_INODE, name2) self.server.forget([(inode.id, 2)]) inode = self.server.lookup(ROOT_INODE, name2) fh = self.server.open(inode.id, os.O_RDONLY) self.assertTrue(self.server.read(fh, 0, len(data)) == data) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_write(self): len_ = self.max_obj_size data = self.random_data(len_) off = self.max_obj_size // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) inode_before = self.server.getattr(inode.id).copy() safe_sleep(CLOCK_GRANULARITY) self.server.write(fh, off, data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.mtime, inode_before.mtime) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertEqual(inode_after.size, off + len_) self.server.write(fh, 0, data) inode_after = self.server.getattr(inode.id) self.assertEqual(inode_after.size, off + len_) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_failsafe(self): len_ = self.max_obj_size data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.cache.clear() self.assertTrue(self.server.failsafe is False) datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1') shutil.copy(datafile, datafile + '.bak') # Modify contents with open(datafile, 'rb+') as rfh: rfh.seek(560) rfh.write(b'blrub!') with self.assertRaises(FUSEError) as cm: with catch_logmsg('^Backend returned malformed data for', count=1, level=logging.ERROR): self.server.read(fh, 0, len_) self.assertEqual(cm.exception.errno, errno.EIO) self.assertTrue(self.server.failsafe) # Restore contents, but should be marked as damaged now os.rename(datafile + '.bak', datafile) with self.assertRaises(FUSEError) as cm: self.server.read(fh, 0, len_) self.assertEqual(cm.exception.errno, errno.EIO) # Release and re-open, now we should be able to access again self.server.release(fh) self.server.forget([(inode.id, 1)]) # ..but not write access since we are in failsafe mode with self.assertRaises(FUSEError) as cm: self.server.open(inode.id, os.O_RDWR) self.assertEqual(cm.exception.errno, errno.EPERM) # ..ready only is fine. fh = self.server.open(inode.id, os.O_RDONLY) self.server.read(fh, 0, len_) # Remove completely, should give error after cache flush os.unlink(datafile) self.server.read(fh, 3, len_ // 2) self.server.cache.clear() with self.assertRaises(FUSEError) as cm: with catch_logmsg('^Backend lost block', count=1, level=logging.ERROR): self.server.read(fh, 5, len_ // 2) self.assertEqual(cm.exception.errno, errno.EIO) # Don't call fsck, we're missing a block def test_create_open(self): name = self.newname() # Create a new file (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode, 1)]) # Open it atomically (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) self.server.forget([(inode, 1)]) self.fsck() def test_edit(self): len_ = self.max_obj_size data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, data) self.server.release(fh) self.block_cache.clear() fh = self.server.open(inode.id, os.O_RDWR) attr = llfuse.EntryAttributes() attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, data[50:]) self.server.release(fh) self.server.forget([(inode.id, 1)]) self.fsck() def test_copy_tree(self): ext_attr_name = b'system.foo.brazl' ext_attr_val = b'schulla dku woumm bramp' src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx()) dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(), Ctx()) # Create file (fh, f1_inode) = self.server.create(src_inode.id, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) self.server.setxattr(f1_inode.id, ext_attr_name, ext_attr_val) # Create hardlink (fh, f2_inode) = self.server.create(src_inode.id, b'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) f2_inode = self.server.link(f2_inode.id, src_inode.id, b'file2_hardlink') # Create subdirectory d1_inode = self.server.mkdir(src_inode.id, b'dir1', self.dir_mode(), Ctx()) d2_inode = self.server.mkdir(d1_inode.id, b'dir2', self.dir_mode(), Ctx()) # ..with a 3rd hardlink f2_inode = self.server.link(f2_inode.id, d1_inode.id, b'file2_hardlink') # Replicate self.server.copy_tree(src_inode.id, dst_inode.id) # Change files fh = self.server.open(f1_inode.id, os.O_RDWR) self.server.write(fh, 0, b'new file1 contents') self.server.release(fh) fh = self.server.open(f2_inode.id, os.O_RDWR) self.server.write(fh, 0, b'new file2 contents') self.server.release(fh) # Get copy properties f1_inode_c = self.server.lookup(dst_inode.id, b'file1') f2_inode_c = self.server.lookup(dst_inode.id, b'file2') f2h_inode_c = self.server.lookup(dst_inode.id, b'file2_hardlink') d1_inode_c = self.server.lookup(dst_inode.id, b'dir1') d2_inode_c = self.server.lookup(d1_inode_c.id, b'dir2') f2_h_inode_c = self.server.lookup(d1_inode_c.id, b'file2_hardlink') # Check file1 fh = self.server.open(f1_inode_c.id, os.O_RDWR) self.assertEqual(self.server.read(fh, 0, 42), b'file1 contents') self.server.release(fh) self.assertNotEqual(f1_inode.id, f1_inode_c.id) self.assertEqual(self.server.getxattr(f1_inode_c.id, ext_attr_name), ext_attr_val) # Check file2 fh = self.server.open(f2_inode_c.id, os.O_RDWR) self.assertTrue(self.server.read(fh, 0, 42) == b'file2 contents') self.server.release(fh) self.assertEqual(f2_inode_c.id, f2h_inode_c.id) self.assertEqual(f2_inode_c.refcount, 3) self.assertNotEqual(f2_inode.id, f2_inode_c.id) self.assertEqual(f2_h_inode_c.id, f2_inode_c.id) # Check subdir1 self.assertNotEqual(d1_inode.id, d1_inode_c.id) self.assertNotEqual(d2_inode.id, d2_inode_c.id) self.server.forget(list(self.server.open_inodes.items())) self.fsck() def test_copy_tree_2(self): src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx()) dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(), Ctx()) # Create file (fh, inode) = self.server.create(src_inode.id, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'block 1 contents') self.server.write(fh, self.max_obj_size, b'block 1 contents') self.server.release(fh) self.server.forget([(inode.id, 1)]) self.server.copy_tree(src_inode.id, dst_inode.id) self.server.forget([(src_inode.id, 1), (dst_inode.id, 1)]) self.fsck() def test_lock_tree(self): inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, b'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, b'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) # Another file (fh, inode3) = self.server.create(ROOT_INODE, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.release(fh) # Lock self.server.lock_tree(inode1.id) for i in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertTrue(self.server.inodes[i].locked) # Remove with self.assertRaises(FUSEError) as cm: self.server._remove(inode1.id, b'file1', inode1a.id) self.assertEqual(cm.exception.errno, errno.EPERM) # Rename / Replace with self.assertRaises(FUSEError) as cm: self.server.rename(ROOT_INODE, b'file1', inode1.id, b'file2') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.rename(inode1.id, b'file1', ROOT_INODE, b'file2') self.assertEqual(cm.exception.errno, errno.EPERM) # Open with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_RDWR) self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_WRONLY) self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(self.server.open(inode3.id, os.O_WRONLY)) # Write fh = self.server.open(inode2a.id, os.O_RDONLY) with self.assertRaises(FUSEError) as cm: self.server.write(fh, 0, b'foo') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(fh) # Create with self.assertRaises(FUSEError) as cm: self.server._create(inode2.id, b'dir1', self.dir_mode(), os.O_RDWR, Ctx()) self.assertEqual(cm.exception.errno, errno.EPERM) # Setattr with self.assertRaises(FUSEError) as cm: self.server.setattr(inode2a.id, dict()) self.assertEqual(cm.exception.errno, errno.EPERM) # xattr with self.assertRaises(FUSEError) as cm: self.server.setxattr(inode2.id, b'name', b'value') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.removexattr(inode2.id, b'name') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.forget(list(self.server.open_inodes.items())) self.fsck() def test_remove_tree(self): inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, b'file1', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, b'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, b'file2', self.file_mode(), os.O_RDWR, Ctx()) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) # Remove self.server.forget(list(self.server.open_inodes.items())) self.server.remove_tree(ROOT_INODE, b'source') for (id_p, name) in ((ROOT_INODE, b'source'), (inode1.id, b'file1'), (inode1.id, b'dir1'), (inode2.id, b'file2')): self.assertFalse( self.db.has_val( 'SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, id_p))) for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertFalse( self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_, ))) self.fsck()
class fsck_tests(TestCase): def setUp(self): self.bucket_dir = tempfile.mkdtemp() self.bucket = local.Bucket(self.bucket_dir, None, None) self.cachedir = tempfile.mkdtemp() + "/" self.blocksize = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) self.fsck = Fsck(self.cachedir, self.bucket, { 'blocksize': self.blocksize }, self.db) self.fsck.expect_errors = True def tearDown(self): shutil.rmtree(self.cachedir) shutil.rmtree(self.bucket_dir) def assert_fsck(self, fn): '''Check that fn detects and corrects an error''' self.fsck.found_errors = False fn() self.assertTrue(self.fsck.found_errors) self.fsck.found_errors = False fn() self.assertFalse(self.fsck.found_errors) def test_cache(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) # Create new block fh = open(self.cachedir + '%d-1' % inode, 'wb') fh.write('somedata') fh.close() self.assert_fsck(self.fsck.check_cache) self.assertEquals(self.bucket['s3ql_data_1'], 'somedata') # This should be ignored fh = open(self.cachedir + '%d-1' % inode, 'wb') fh.write('otherdata') fh.close() self.assert_fsck(self.fsck.check_cache) self.assertEquals(self.bucket['s3ql_data_1'], 'somedata') # Existing block with open(self.cachedir + '%d-2' % inode, 'wb') as fh: fh.write('somedata') self.assert_fsck(self.fsck.check_cache) # Old block preserved with open(self.cachedir + '%d-1' % inode, 'wb') as fh: fh.write('overwriting somedata') self.assert_fsck(self.fsck.check_cache) # Old block removed with open(self.cachedir + '%d-2' % inode, 'wb') as fh: fh.write('overwriting last piece of somedata') self.assert_fsck(self.fsck.check_cache) def test_lof1(self): # Make lost+found a file inode = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,)) self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?', (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode)) self.assert_fsck(self.fsck.check_lof) def test_lof2(self): # Remove lost+found name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found',)) self.db.execute('DELETE FROM contents WHERE name_id=? and parent_inode=?', (name_id, ROOT_INODE)) self.assert_fsck(self.fsck.check_lof) def test_wrong_inode_refcount(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 0)) self._link('name1', inode) self._link('name2', inode) self.assert_fsck(self.fsck.check_inode_refcount) def test_orphaned_inode(self): self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 0)) self.assert_fsck(self.fsck.check_inode_refcount) def test_name_refcount(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 0)) self._link('name1', inode) self._link('name2', inode) self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?', ('name1',)) self.assert_fsck(self.fsck.check_name_refcount) def test_orphaned_name(self): self._add_name('zupbrazl') self.assert_fsck(self.fsck.check_name_refcount) def test_ref_integrity(self): self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name('foobar'), 124, ROOT_INODE)) self.fsck.found_errors = False self.fsck.check_foreign_keys() self.assertTrue(self.fsck.found_errors) def _add_name(self, name): '''Get id for *name* and increase refcount Name is inserted in table if it does not yet exist. ''' try: name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (name,)) except NoSuchRowError: name_id = self.db.rowid('INSERT INTO names (name, refcount) VALUES(?,?)', (name, 1)) else: self.db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,)) return name_id def _link(self, name, inode): '''Link /*name* to *inode*''' self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(name), inode, ROOT_INODE)) def test_inode_sizes(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 128)) self._link('test-entry', id_) obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 512)) # Case 1 self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?', (None, self.blocksize + 120, id_)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.assert_fsck(self.fsck.check_inode_sizes) # Case 2 self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,)) self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?', (block_id, 129, id_)) self.assert_fsck(self.fsck.check_inode_sizes) # Case 3 self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?', (block_id, self.blocksize + 120, id_)) self.assert_fsck(self.fsck.check_inode_sizes) def test_keylist(self): # Create an object that only exists in the bucket self.bucket['s3ql_data_4364'] = 'Testdata' self.assert_fsck(self.fsck.check_keylist) # Create an object that does not exist in the bucket self.db.execute('INSERT INTO objects (id, refcount) VALUES(?, ?)', (34, 1)) self.assert_fsck(self.fsck.check_keylist) def test_missing_obj(self): obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 128)) id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size,block_id) " "VALUES (?,?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 128, block_id)) self._link('test-entry', id_) self.assert_fsck(self.fsck.check_keylist) @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def test_loops(self): # Create some directory inodes inodes = [ self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1)) for dummy in range(3) ] inodes.append(inodes[0]) last = inodes[0] for inode in inodes[1:]: self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)', (self._add_name(bytes(inode)), inode, last)) last = inode self.fsck.found_errors = False self.fsck.check_inode_refcount() self.assertFalse(self.fsck.found_errors) self.fsck.check_loops() self.assertTrue(self.fsck.found_errors) # We can't fix loops yet def test_obj_refcounts(self): obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)') self.db.execute('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (0, obj_id, 0)) self.db.execute('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (0, obj_id, 0)) self.assert_fsck(self.fsck.check_obj_refcounts) def test_orphaned_obj(self): self.db.rowid('INSERT INTO objects (refcount) VALUES(1)') self.assert_fsck(self.fsck.check_obj_refcounts) def test_wrong_block_refcount(self): obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size,block_id) " "VALUES (?,?,?,?,?,?,?,?,?)", (stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0, block_id)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.assert_fsck(self.fsck.check_block_refcount) def test_orphaned_block(self): obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)') self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) self.assert_fsck(self.fsck.check_block_refcount) def test_unix_size(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_inode_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode,)) self.fsck.check_inode_unix() self.assertTrue(self.fsck.found_errors) def test_unix_size_symlink(self): inode = 42 target = 'some funny random string' self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, len(target))) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, target)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_inode_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,)) self.fsck.check_inode_unix() self.assertTrue(self.fsck.found_errors) def test_unix_target(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_inode_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, 'foo')) self.fsck.check_inode_unix() self.assertTrue(self.fsck.found_errors) def test_symlink_no_target(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.check_inode_unix() self.assertTrue(self.fsck.found_errors) def test_unix_rdev(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_inode_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode)) self.fsck.check_inode_unix() self.assertTrue(self.fsck.found_errors) def test_unix_child(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_inode_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name('foo'), ROOT_INODE, inode)) self.fsck.check_inode_unix() self.assertTrue(self.fsck.found_errors) def test_unix_blocks(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_inode_unix() self.assertFalse(self.fsck.found_errors) obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.fsck.check_inode_unix() self.assertTrue(self.fsck.found_errors)
def upgrade(bucket): '''Upgrade file system to newest revision''' # Access to protected member #pylint: disable=W0212 log.info('Getting file system parameters..') seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in bucket.list('s3ql_seq_no_') ] seq_no = max(seq_nos) if not seq_nos: raise QuietError(textwrap.dedent(''' File system revision too old to upgrade! You need to use an older S3QL version to upgrade to a more recent revision before you can use this version to upgrade to the newest revision. ''')) param = bucket.lookup('s3ql_metadata') # Check for unclean shutdown if param['seq_no'] < seq_no: if bucket.is_get_consistent(): raise QuietError(textwrap.fill(textwrap.dedent('''\ It appears that the file system is still mounted somewhere else. If this is not the case, the file system may have not been unmounted cleanly and you should try to run fsck on the computer where the file system has been mounted most recently. '''))) else: raise QuietError(textwrap.fill(textwrap.dedent('''\ It appears that the file system is still mounted somewhere else. If this is not the case, the file system may have not been unmounted cleanly or the data from the most-recent mount may have not yet propagated through the backend. In the later case, waiting for a while should fix the problem, in the former case you should try to run fsck on the computer where the file system has been mounted most recently. '''))) # Check that the fs itself is clean if param['needs_fsck']: raise QuietError("File system damaged, run fsck!") # Check revision if param['revision'] < CURRENT_FS_REV - 1: raise QuietError(textwrap.dedent(''' File system revision too old to upgrade! You need to use an older S3QL version to upgrade to a more recent revision before you can use this version to upgrade to the newest revision. ''')) elif param['revision'] >= CURRENT_FS_REV: print('File system already at most-recent revision') return print(textwrap.dedent(''' I am about to update the file system to the newest revision. You will not be able to access the file system with any older version of S3QL after this operation. You should make very sure that this command is not interrupted and that no one else tries to mount, fsck or upgrade the file system at the same time. ''')) print('Please enter "yes" to continue.', '> ', sep='\n', end='') if sys.stdin.readline().strip().lower() != 'yes': raise QuietError() log.info('Upgrading from revision %d to %d...', CURRENT_FS_REV - 1, CURRENT_FS_REV) if 's3ql_hash_check_status' not in bucket: if (isinstance(bucket, LegacyLocalBucket) or (isinstance(bucket, BetterBucket) and isinstance(bucket.bucket, LegacyLocalBucket))): if isinstance(bucket, LegacyLocalBucket): bucketpath = bucket.name else: bucketpath = bucket.bucket.name for (path, _, filenames) in os.walk(bucketpath, topdown=True): for name in filenames: if not name.endswith('.meta'): continue basename = os.path.splitext(name)[0] if '=00' in basename: raise RuntimeError("No, seriously, you tried to break things, didn't you?") with open(os.path.join(path, name), 'r+b') as dst: dst.seek(0, os.SEEK_END) with open(os.path.join(path, basename + '.dat'), 'rb') as src: shutil.copyfileobj(src, dst) basename = basename.replace('#', '=23') os.rename(os.path.join(path, name), os.path.join(path, basename)) os.unlink(os.path.join(path, basename + '.dat')) if isinstance(bucket, LegacyLocalBucket): bucket = LocalBucket(bucket.name, None, None) else: bucket.bucket = LocalBucket(bucket.bucket.name, None, None) # Download metadata log.info("Downloading & uncompressing metadata...") dbfile = tempfile.NamedTemporaryFile() with tempfile.TemporaryFile() as tmp: with bucket.open_read("s3ql_metadata") as fh: shutil.copyfileobj(fh, tmp) db = Connection(dbfile.name, fast_mode=True) tmp.seek(0) restore_legacy_metadata(tmp, db) # Increase metadata sequence no param['seq_no'] += 1 bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty' for i in seq_nos: if i < param['seq_no'] - 5: del bucket['s3ql_seq_no_%d' % i ] log.info("Uploading database..") cycle_metadata(bucket) param['last-modified'] = time.time() - time.timezone with bucket.open_write("s3ql_metadata", param) as fh: dump_metadata(fh, db) else: log.info("Downloading & uncompressing metadata...") dbfile = tempfile.NamedTemporaryFile() with tempfile.TemporaryFile() as tmp: with bucket.open_read("s3ql_metadata") as fh: shutil.copyfileobj(fh, tmp) db = Connection(dbfile.name, fast_mode=True) tmp.seek(0) restore_metadata(tmp, db) print(textwrap.dedent(''' The following process may take a long time, but can be interrupted with Ctrl-C and resumed from this point by calling `s3qladm upgrade` again. Please see Changes.txt for why this is necessary. ''')) if 's3ql_hash_check_status' not in bucket: log.info("Starting hash verification..") start_obj = 0 else: start_obj = int(bucket['s3ql_hash_check_status']) log.info("Resuming hash verification with object %d..", start_obj) try: total = db.get_val('SELECT COUNT(id) FROM objects') i = 0 for (obj_id, hash_) in db.query('SELECT obj_id, hash FROM blocks JOIN objects ' 'ON obj_id == objects.id WHERE obj_id > ? ' 'ORDER BY obj_id ASC', (start_obj,)): if i % 100 == 0: log.info(' ..checked %d/%d objects..', i, total) sha = hashlib.sha256() with bucket.open_read("s3ql_data_%d" % obj_id) as fh: while True: buf = fh.read(128*1024) if not buf: break sha.update(buf) if sha.digest() != hash_: log.warn('Object %d corrupted! Deleting..', obj_id) bucket.delete('s3ql_data_%d' % obj_id) i += 1 except KeyboardInterrupt: log.info("Storing verification status...") bucket['s3ql_hash_check_status'] = '%d' % obj_id raise QuietError('Aborting..') log.info('Running fsck...') fsck = Fsck(tempfile.mkdtemp(), bucket, param, db) fsck.check() if fsck.uncorrectable_errors: raise QuietError("Uncorrectable errors found, aborting.") param['revision'] = CURRENT_FS_REV param['seq_no'] += 1 bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty' log.info("Uploading database..") cycle_metadata(bucket) param['last-modified'] = time.time() - time.timezone with bucket.open_write("s3ql_metadata", param) as fh: dump_metadata(fh, db)
class fs_api_tests(TestCase): def setUp(self): self.bucket_dir = tempfile.mkdtemp() self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None)) self.bucket = self.bucket_pool.pop_conn() self.cachedir = tempfile.mkdtemp() + "/" self.blocksize = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 5) self.server = fs.Operations(self.block_cache, self.db, self.blocksize) self.server.init() # Keep track of unused filenames self.name_cnt = 0 def tearDown(self): self.server.destroy() self.block_cache.destroy() if os.path.exists(self.cachedir): shutil.rmtree(self.cachedir) shutil.rmtree(self.bucket_dir) llfuse.lock.release() @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def fsck(self): self.block_cache.clear() self.server.inodes.flush() fsck = Fsck(self.cachedir, self.bucket, { 'blocksize': self.blocksize }, self.db) fsck.check() self.assertFalse(fsck.found_errors) def newname(self): self.name_cnt += 1 return "s3ql_%d" % self.name_cnt def test_getattr_root(self): self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode)) self.fsck() def test_create(self): ctx = Ctx() mode = self.dir_mode() name = self.newname() inode_p_old = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server._create(ROOT_INODE, name, mode, ctx) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON name_id = names.id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) inode = self.server.getattr(id_) self.assertEqual(inode.mode, mode) self.assertEqual(inode.uid, ctx.uid) self.assertEqual(inode.gid, ctx.gid) self.assertEqual(inode.refcount, 1) self.assertEqual(inode.size, 0) inode_p_new = self.server.getattr(ROOT_INODE) self.assertGreater(inode_p_new.mtime, inode_p_old.mtime) self.assertGreater(inode_p_new.ctime, inode_p_old.ctime) self.fsck() def test_extstat(self): # Test with zero contents self.server.extstat() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.server.extstat() # Test with data in file fh = self.server.open(inode.id, os.O_RDWR) self.server.write(fh, 0, 'foobar') self.server.release(fh) self.server.extstat() self.fsck() @staticmethod def dir_mode(): return (randint(0, 07777) & ~stat.S_IFDIR) | stat.S_IFDIR @staticmethod def file_mode(): return (randint(0, 07777) & ~stat.S_IFREG) | stat.S_IFREG def test_getxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.getxattr, inode.id, 'nonexistant-attr') self.server.setxattr(inode.id, 'my-attr', 'strabumm!') self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!') self.fsck() def test_link(self): name = self.newname() inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) time.sleep(CLOCK_GRANULARITY) inode_before = self.server.getattr(inode.id).copy() self.server.link(inode.id, inode_p_new.id, name) inode_after = self.server.lookup(inode_p_new.id, name) inode_p_new_after = self.server.getattr(inode_p_new.id) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, inode_p_new.id)) self.assertEqual(inode_before.id, id_) self.assertEqual(inode_after.refcount, 2) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.fsck() def test_listxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.assertListEqual([], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, 'key1', 'blub') self.assertListEqual(['key1'], self.server.listxattr(inode.id)) self.server.setxattr(inode.id, 'key2', 'blub') self.assertListEqual(sorted(['key1', 'key2']), sorted(self.server.listxattr(inode.id))) self.fsck() def test_read(self): len_ = self.blocksize data = self.random_data(len_) off = self.blocksize // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.write(fh, off, data) inode_before = self.server.getattr(inode.id).copy() time.sleep(CLOCK_GRANULARITY) self.assertTrue(self.server.read(fh, off, len_) == data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.atime, inode_before.atime) self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off]) self.assertTrue(self.server.read(fh, self.blocksize, len_) == data[off:]) self.server.release(fh) self.fsck() def test_readdir(self): # Create a few entries names = [ 'entry_%2d' % i for i in range(20) ] for name in names: (fh, _) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx()) self.server.release(fh) # Delete some to make sure that we don't have continous rowids remove_no = [0, 2, 3, 5, 9] for i in remove_no: self.server.unlink(ROOT_INODE, names[i]) del names[i] # Read all fh = self.server.opendir(ROOT_INODE) self.assertListEqual(sorted(names + ['lost+found']) , sorted(x[0] for x in self.server.readdir(fh, 0))) self.server.releasedir(fh) # Read in parts fh = self.server.opendir(ROOT_INODE) entries = list() try: next_ = 0 while True: gen = self.server.readdir(fh, next_) for _ in range(3): (name, _, next_) = next(gen) entries.append(name) except StopIteration: pass self.assertListEqual(sorted(names + ['lost+found']) , sorted(entries)) self.server.releasedir(fh) self.fsck() def test_release(self): name = self.newname() # Test that entries are deleted when they're no longer referenced (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx()) self.server.write(fh, 0, 'foobar') self.server.unlink(ROOT_INODE, name) self.assertFalse(self.db.has_val('SELECT 1 FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.server.getattr(inode.id).id) self.server.release(fh) self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_removexattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.assertRaises(FUSEError, self.server.removexattr, inode.id, 'some name') self.server.setxattr(inode.id, 'key1', 'blub') self.server.removexattr(inode.id, 'key1') self.assertListEqual([], self.server.listxattr(inode.id)) self.fsck() def test_rename(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id == name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.fsck() def test_replace_file(self): oldname = self.newname() newname = self.newname() (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), Ctx()) self.server.write(fh, 0, 'some data to deal with') self.server.release(fh) self.server.setxattr(inode.id, 'test_xattr', '42*8') inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(), Ctx()) self.server.write(fh, 0, 'even more data to deal with') self.server.release(fh) self.server.setxattr(inode2.id, 'test_xattr', '42*8') time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,))) self.fsck() def test_replace_dir(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx()) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx()) inode_p_new_before = self.server.getattr(inode_p_new.id).copy() inode_p_old_before = self.server.getattr(ROOT_INODE).copy() inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx()) time.sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname) inode_p_old_after = self.server.getattr(ROOT_INODE) inode_p_new_after = self.server.getattr(inode_p_new.id) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime) self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime) self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime) self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,))) self.fsck() def test_setattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0641, Ctx()) self.server.release(fh) inode_old = self.server.getattr(inode.id).copy() attr = llfuse.EntryAttributes() attr.st_mode = self.file_mode() attr.st_uid = randint(0, 2 ** 32) attr.st_gid = randint(0, 2 ** 32) attr.st_rdev = randint(0, 2 ** 32) attr.st_atime = time.timezone + randint(0, 2 ** 32) / 10 ** 6 attr.st_mtime = time.timezone + randint(0, 2 ** 32) / 10 ** 6 time.sleep(CLOCK_GRANULARITY) self.server.setattr(inode.id, attr) inode_new = self.server.getattr(inode.id) self.assertGreater(inode_new.ctime, inode_old.ctime) for key in attr.__slots__: if getattr(attr, key) is not None: self.assertEquals(getattr(attr, key), getattr(inode_new, key)) def test_truncate(self): len_ = int(2.7 * self.blocksize) data = self.random_data(len_) attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.write(fh, 0, data) attr.st_size = len_ // 2 self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2]) attr.st_size = len_ self.server.setattr(inode.id, attr) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2] + b'\0' * (len_ // 2)) self.server.release(fh) self.fsck() def test_truncate_0(self): len1 = 158 len2 = 133 attr = llfuse.EntryAttributes() (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.write(fh, 0, self.random_data(len1)) self.server.release(fh) self.server.inodes.flush() fh = self.server.open(inode.id, os.O_RDWR) attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, self.random_data(len2)) self.server.release(fh) self.fsck() def test_setxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.server.setxattr(inode.id, 'my-attr', 'strabumm!') self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!') self.fsck() def test_statfs(self): # Test with zero contents self.server.statfs() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.release(fh) self.server.statfs() # Test with data in file fh = self.server.open(inode.id, None) self.server.write(fh, 0, 'foobar') self.server.release(fh) self.server.statfs() def test_symlink(self): target = self.newname() name = self.newname() inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) inode = self.server.symlink(ROOT_INODE, name, target, Ctx()) inode_p_after = self.server.getattr(ROOT_INODE) self.assertEqual(target, self.server.readlink(inode.id)) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) self.assertEqual(inode.id, id_) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) def test_unlink(self): name = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx()) self.server.write(fh, 0, 'some data to deal with') self.server.release(fh) # Add extended attributes self.server.setxattr(inode.id, 'test_xattr', '42*8') inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.unlink(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_rmdir(self): name = self.newname() inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx()) inode_p_before = self.server.getattr(ROOT_INODE).copy() time.sleep(CLOCK_GRANULARITY) self.server.rmdir(ROOT_INODE, name) inode_p_after = self.server.getattr(ROOT_INODE) self.assertLess(inode_p_before.mtime, inode_p_after.mtime) self.assertLess(inode_p_before.ctime, inode_p_after.ctime) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.fsck() def test_relink(self): name = self.newname() name2 = self.newname() data = 'some data to deal with' (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx()) self.server.write(fh, 0, data) self.server.unlink(ROOT_INODE, name) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,))) self.server.link(inode.id, ROOT_INODE, name2) self.server.release(fh) fh = self.server.open(inode.id, os.O_RDONLY) self.assertTrue(self.server.read(fh, 0, len(data)) == data) self.server.release(fh) self.fsck() def test_write(self): len_ = self.blocksize data = self.random_data(len_) off = self.blocksize // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) inode_before = self.server.getattr(inode.id).copy() time.sleep(CLOCK_GRANULARITY) self.server.write(fh, off, data) inode_after = self.server.getattr(inode.id) self.assertGreater(inode_after.mtime, inode_before.mtime) self.assertGreater(inode_after.ctime, inode_before.ctime) self.assertEqual(inode_after.size, off + len_) self.server.write(fh, 0, data) inode_after = self.server.getattr(inode.id) self.assertEqual(inode_after.size, off + len_) self.server.release(fh) self.fsck() def test_edit(self): len_ = self.blocksize data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx()) self.server.write(fh, 0, data) self.server.release(fh) self.block_cache.clear() fh = self.server.open(inode.id, os.O_RDWR) attr = llfuse.EntryAttributes() attr.st_size = 0 self.server.setattr(inode.id, attr) self.server.write(fh, 0, data[50:]) self.server.release(fh) self.fsck() def test_copy_tree(self): src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx()) # Create file (fh, f1_inode) = self.server.create(src_inode.id, 'file1', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) # Create hardlink (fh, f2_inode) = self.server.create(src_inode.id, 'file2', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) f2_inode = self.server.link(f2_inode.id, src_inode.id, 'file2_hardlink') # Create subdirectory d1_inode = self.server.mkdir(src_inode.id, 'dir1', self.dir_mode(), Ctx()) d2_inode = self.server.mkdir(d1_inode.id, 'dir2', self.dir_mode(), Ctx()) # ..with a 3rd hardlink f2_inode = self.server.link(f2_inode.id, d1_inode.id, 'file2_hardlink') # Replicate self.server.copy_tree(src_inode.id, dst_inode.id) # Change files fh = self.server.open(f1_inode.id, os.O_RDWR) self.server.write(fh, 0, 'new file1 contents') self.server.release(fh) fh = self.server.open(f2_inode.id, os.O_RDWR) self.server.write(fh, 0, 'new file2 contents') self.server.release(fh) # Get copy properties f1_inode_c = self.server.lookup(dst_inode.id, 'file1') f2_inode_c = self.server.lookup(dst_inode.id, 'file2') f2h_inode_c = self.server.lookup(dst_inode.id, 'file2_hardlink') d1_inode_c = self.server.lookup(dst_inode.id, 'dir1') d2_inode_c = self.server.lookup(d1_inode_c.id, 'dir2') f2_h_inode_c = self.server.lookup(d1_inode_c.id, 'file2_hardlink') # Check file1 fh = self.server.open(f1_inode_c.id, os.O_RDWR) self.assertEqual(self.server.read(fh, 0, 42), 'file1 contents') self.server.release(fh) self.assertNotEqual(f1_inode.id, f1_inode_c.id) # Check file2 fh = self.server.open(f2_inode_c.id, os.O_RDWR) self.assertTrue(self.server.read(fh, 0, 42) == 'file2 contents') self.server.release(fh) self.assertEqual(f2_inode_c.id, f2h_inode_c.id) self.assertEqual(f2_inode_c.refcount, 3) self.assertNotEqual(f2_inode.id, f2_inode_c.id) self.assertEqual(f2_h_inode_c.id, f2_inode_c.id) # Check subdir1 self.assertNotEqual(d1_inode.id, d1_inode_c.id) self.assertNotEqual(d2_inode.id, d2_inode_c.id) self.fsck() def test_lock_tree(self): inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, 'file1', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, 'file2', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) # Another file (fh, inode3) = self.server.create(ROOT_INODE, 'file1', self.file_mode(), Ctx()) self.server.release(fh) # Lock self.server.lock_tree(inode1.id) for i in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertTrue(self.server.inodes[i].locked) # Remove with self.assertRaises(FUSEError) as cm: self.server._remove(inode1.id, 'file1', inode1a.id) self.assertEqual(cm.exception.errno, errno.EPERM) # Rename / Replace with self.assertRaises(FUSEError) as cm: self.server.rename(ROOT_INODE, 'file1', inode1.id, 'file2') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.rename(inode1.id, 'file1', ROOT_INODE, 'file2') self.assertEqual(cm.exception.errno, errno.EPERM) # Open with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_RDWR) self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.id, os.O_WRONLY) self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(self.server.open(inode3.id, os.O_WRONLY)) # Write fh = self.server.open(inode2a.id, os.O_RDONLY) with self.assertRaises(FUSEError) as cm: self.server.write(fh, 0, 'foo') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(fh) # Create with self.assertRaises(FUSEError) as cm: self.server._create(inode2.id, 'dir1', self.dir_mode(), Ctx()) self.assertEqual(cm.exception.errno, errno.EPERM) # Setattr with self.assertRaises(FUSEError) as cm: self.server.setattr(inode2a.id, dict()) self.assertEqual(cm.exception.errno, errno.EPERM) # xattr with self.assertRaises(FUSEError) as cm: self.server.setxattr(inode2.id, 'name', 'value') self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.removexattr(inode2.id, 'name') self.assertEqual(cm.exception.errno, errno.EPERM) self.fsck() def test_remove_tree(self): inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx()) # Create file (fh, inode1a) = self.server.create(inode1.id, 'file1', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx()) (fh, inode2a) = self.server.create(inode2.id, 'file2', self.file_mode(), Ctx()) self.server.write(fh, 0, 'file2 contents') self.server.release(fh) # Remove self.server.remove_tree(ROOT_INODE, 'source') for (id_p, name) in ((ROOT_INODE, 'source'), (inode1.id, 'file1'), (inode1.id, 'dir1'), (inode2.id, 'file2')): self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, id_p))) for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id): self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,))) self.fsck()
class fsck_tests(unittest.TestCase): def setUp(self): self.backend_dir = tempfile.mkdtemp() self.backend = local.Backend('local://' + self.backend_dir, None, None) self.cachedir = tempfile.mkdtemp() self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) self.fsck = Fsck(self.cachedir, self.backend, { 'max_obj_size': self.max_obj_size }, self.db) self.fsck.expect_errors = True def tearDown(self): shutil.rmtree(self.cachedir) shutil.rmtree(self.backend_dir) os.unlink(self.dbfile.name) def assert_fsck(self, fn): '''Check that fn detects and corrects an error''' self.fsck.found_errors = False fn() self.assertTrue(self.fsck.found_errors) self.fsck.found_errors = False self.fsck.check() self.assertFalse(self.fsck.found_errors) def test_cache(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 8)) self._link('test-entry', inode) # Create new block fh = open(self.cachedir + '/%d-0' % inode, 'wb') fh.write('somedata') fh.close() self.assert_fsck(self.fsck.check_cache) self.assertEquals(self.backend['s3ql_data_1'], 'somedata') # Existing block self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 8, inode)) with open(self.cachedir + '/%d-1' % inode, 'wb') as fh: fh.write('somedata') self.assert_fsck(self.fsck.check_cache) # Old block preserved with open(self.cachedir + '/%d-0' % inode, 'wb') as fh: fh.write('somedat2') self.assert_fsck(self.fsck.check_cache) # Old block removed with open(self.cachedir + '/%d-1' % inode, 'wb') as fh: fh.write('somedat3') self.assert_fsck(self.fsck.check_cache) def test_lof1(self): # Make lost+found a file inode = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,)) self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?', (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode)) def check(): self.fsck.check_lof() self.fsck.check_inodes_refcount() self.assert_fsck(check) def test_lof2(self): # Remove lost+found name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found',)) inode = self.db.get_val('SELECT inode FROM contents WHERE name_id=? AND ' 'parent_inode=?', (name_id, ROOT_INODE)) self.db.execute('DELETE FROM inodes WHERE id=?', (inode,)) self.db.execute('DELETE FROM contents WHERE name_id=? and parent_inode=?', (name_id, ROOT_INODE)) self.db.execute('UPDATE names SET refcount = refcount-1 WHERE id=?', (name_id,)) self.assert_fsck(self.fsck.check_lof) def test_wrong_inode_refcount(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 0)) self._link('name1', inode) self._link('name2', inode) self.assert_fsck(self.fsck.check_inodes_refcount) def test_orphaned_inode(self): self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 0)) self.assert_fsck(self.fsck.check_inodes_refcount) def test_name_refcount(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 2, 0)) self._link('name1', inode) self._link('name2', inode) self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?', ('name1',)) self.assert_fsck(self.fsck.check_names_refcount) def test_orphaned_name(self): self._add_name('zupbrazl') self.assert_fsck(self.fsck.check_names_refcount) def test_contents_inode(self): self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name('foobar'), 124, ROOT_INODE)) self.assert_fsck(self.fsck.check_contents_inode) def test_contents_inode_p(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 0)) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name('foobar'), inode, 123)) self.assert_fsck(self.fsck.check_contents_parent_inode) def test_contents_name(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 0)) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (42, inode, ROOT_INODE)) self.assert_fsck(self.fsck.check_contents_name) def _add_name(self, name): '''Get id for *name* and increase refcount Name is inserted in table if it does not yet exist. ''' try: name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (name,)) except NoSuchRowError: name_id = self.db.rowid('INSERT INTO names (name, refcount) VALUES(?,?)', (name, 1)) else: self.db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,)) return name_id def _link(self, name, inode, parent_inode=ROOT_INODE): '''Link /*name* to *inode*''' self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(name), inode, parent_inode)) def test_inodes_size(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 128)) self._link('test-entry', id_) obj_id = self.db.rowid('INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36)) block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 512)) self.backend['s3ql_data_%d' % obj_id] = 'foo' # Case 1 self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.assert_fsck(self.fsck.check_inodes_size) # Case 2 self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_)) self.assert_fsck(self.fsck.check_inodes_size) # Case 3 self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) self.db.execute('UPDATE blocks SET refcount = refcount + 1 WHERE id = ?', (block_id,)) self.assert_fsck(self.fsck.check_inodes_size) def test_objects_id(self): # Create an object that only exists in the backend self.backend['s3ql_data_4364'] = 'Testdata' self.assert_fsck(self.fsck.check_objects_id) # Create an object that does not exist in the backend self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (34, 1, 27)) self.assert_fsck(self.fsck.check_objects_id) def test_blocks_obj_id(self): block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, 48, 128)) id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 128)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, block_id)) self._link('test-entry', id_) self.assert_fsck(self.fsck.check_blocks_obj_id) def test_missing_obj(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 128)) id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 128)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, block_id)) self._link('test-entry', id_) self.assert_fsck(self.fsck.check_objects_id) def test_inode_blocks_inode(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 42)') self.backend['s3ql_data_%d' % obj_id] = 'foo' block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 34)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (27, 0, block_id)) self.assert_fsck(self.fsck.check_inode_blocks_inode) def test_inode_blocks_block_id(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 128)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, 35)) self._link('test-entry', id_) self.assert_fsck(self.fsck.check_inode_blocks_block_id) def test_symlinks_inode(self): self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (42, b'somewhere else')) self.assert_fsck(self.fsck.check_symlinks_inode) def test_ext_attrs_inode(self): self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)', (self._add_name('some name'), 34, b'some value')) self.assert_fsck(self.fsck.check_ext_attributes_inode) def test_ext_attrs_name(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1, 128)) self._link('test-entry', id_) self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)', (34, id_, b'some value')) self.assert_fsck(self.fsck.check_ext_attributes_name) @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def test_loops(self): # Create some directory inodes inodes = [ self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time.time(), time.time(), time.time(), 1)) for dummy in range(3) ] inodes.append(inodes[0]) last = inodes[0] for inode in inodes[1:]: self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)', (self._add_name(bytes(inode)), inode, last)) last = inode self.assert_fsck(self.fsck.check_loops) def test_obj_refcounts(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 42)') block_id_1 = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) block_id_2 = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) self.backend['s3ql_data_%d' % obj_id] = 'foo' inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 2048)) self._link('test-entry', inode) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id_1)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 2, block_id_2)) self.assert_fsck(self.fsck.check_objects_refcount) def test_orphaned_obj(self): self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 33)') self.assert_fsck(self.fsck.check_objects_refcount) def test_wrong_block_refcount(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 23)') self.backend['s3ql_data_%d' % obj_id] = 'foo' block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, self.max_obj_size)) self._link('test-entry', inode) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 0, block_id)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.assert_fsck(self.fsck.check_blocks_refcount) def test_orphaned_block(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 24)') self.backend['s3ql_data_%d' % obj_id] = 'foo' self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 3)) self.assert_fsck(self.fsck.check_blocks_refcount) def test_unix_size(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode,)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_size_symlink(self): inode = 42 target = 'some funny random string' self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, len(target))) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, target)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_target(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, 'foo')) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_nomode_reg(self): perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP stamp = time.time() inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) self._link('test-entry', inode) self.assert_fsck(self.fsck.check_unix) newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode,)) self.assertEqual(stat.S_IMODE(newmode), perms) self.assertEqual(stat.S_IFMT(newmode), stat.S_IFREG) def test_unix_nomode_dir(self): perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP stamp = time.time() inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) inode2 = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms | stat.S_IFREG, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) self._link('test-entry', inode) self._link('subentry', inode2, inode) self.assert_fsck(self.fsck.check_unix) newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode,)) self.assertEqual(stat.S_IMODE(newmode), perms) self.assertEqual(stat.S_IFMT(newmode), stat.S_IFDIR) def test_unix_symlink_no_target(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_rdev(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_child(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name('foo'), ROOT_INODE, inode)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_blocks(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1)) self._link('test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors)