class cache_tests(TestCase): def setUp(self): self.bucket_dir = tempfile.mkdtemp() self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None)) self.cachedir = tempfile.mkdtemp() + "/" self.blocksize = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Create an inode we can work with self.inode = 42 self.db.execute( "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", ( self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32, ), ) self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() def tearDown(self): self.cache.bucket_pool = self.bucket_pool self.cache.destroy() if os.path.exists(self.cachedir): shutil.rmtree(self.cachedir) shutil.rmtree(self.bucket_dir) llfuse.lock.release() @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fh: return fh.read(len_) def test_get(self): inode = self.inode blockno = 11 data = self.random_data(int(0.5 * self.blocksize)) # Case 1: Object does not exist yet with self.cache.get(inode, blockno) as fh: fh.seek(0) fh.write(data) # Case 2: Object is in cache with self.cache.get(inode, blockno) as fh: fh.seek(0) self.assertEqual(data, fh.read(len(data))) # Case 3: Object needs to be downloaded self.cache.clear() with self.cache.get(inode, blockno) as fh: fh.seek(0) self.assertEqual(data, fh.read(len(data))) def test_expire(self): inode = self.inode # Define the 4 most recently accessed ones most_recent = [7, 11, 10, 8] for i in most_recent: time.sleep(0.2) with self.cache.get(inode, i) as fh: fh.write("%d" % i) # And some others for i in range(20): if i in most_recent: continue with self.cache.get(inode, i) as fh: fh.write("%d" % i) # Flush the 2 most recently accessed ones commit(self.cache, inode, most_recent[-2]) commit(self.cache, inode, most_recent[-3]) # We want to expire 4 entries, 2 of which are already flushed self.cache.max_entries = 16 self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=2) self.cache.expire() self.cache.bucket_pool.verify() self.assertEqual(len(self.cache.entries), 16) for i in range(20): if i in most_recent: self.assertTrue((inode, i) not in self.cache.entries) else: self.assertTrue((inode, i) in self.cache.entries) def test_upload(self): inode = self.inode datalen = int(0.1 * self.cache.max_size) blockno1 = 21 blockno2 = 25 blockno3 = 7 data1 = self.random_data(datalen) data2 = self.random_data(datalen) data3 = self.random_data(datalen) # Case 1: create new object self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data1) el1 = fh self.cache.upload(el1) self.cache.bucket_pool.verify() # Case 2: Link new object self.cache.bucket_pool = TestBucketPool(self.bucket_pool) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data1) el2 = fh self.cache.upload(el2) self.cache.bucket_pool.verify() # Case 3: Upload old object, still has references self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data2) self.cache.upload(el1) self.cache.bucket_pool.verify() # Case 4: Upload old object, no references left self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1, no_write=1) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data3) self.cache.upload(el2) self.cache.bucket_pool.verify() # Case 5: Link old object, no references left self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data2) self.cache.upload(el2) self.cache.bucket_pool.verify() # Case 6: Link old object, still has references # (Need to create another object first) self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) with self.cache.get(inode, blockno3) as fh: fh.seek(0) fh.write(data1) el3 = fh self.cache.upload(el3) self.cache.bucket_pool.verify() self.cache.bucket_pool = TestBucketPool(self.bucket_pool) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data1) self.cache.upload(el1) self.cache.clear() self.cache.bucket_pool.verify() def test_remove_referenced(self): inode = self.inode datalen = int(0.1 * self.cache.max_size) blockno1 = 21 blockno2 = 24 data = self.random_data(datalen) self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data) self.cache.clear() self.cache.bucket_pool.verify() self.cache.bucket_pool = TestBucketPool(self.bucket_pool) self.cache.remove(inode, blockno1) self.cache.bucket_pool.verify() def test_remove_cache(self): inode = self.inode data1 = self.random_data(int(0.4 * self.blocksize)) # Case 1: Elements only in cache with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.remove(inode, 1) with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertTrue(fh.read(42) == "") def test_remove_cache_db(self): inode = self.inode data1 = self.random_data(int(0.4 * self.blocksize)) # Case 2: Element in cache and db with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) commit(self.cache, inode) self.cache.bucket_pool.verify() self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1) self.cache.remove(inode, 1) self.cache.bucket_pool.verify() with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertTrue(fh.read(42) == "") def test_remove_db(self): inode = self.inode data1 = self.random_data(int(0.4 * self.blocksize)) # Case 3: Element only in DB with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1) self.cache.clear() self.cache.bucket_pool.verify() self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1) self.cache.remove(inode, 1) self.cache.bucket_pool.verify() with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertTrue(fh.read(42) == "")