def testVeryLargeCaches(self): cache = ZEO.cache.ClientCache('cache', size=(1<<32)+(1<<20)) cache.store(n1, n2, None, b"x") cache.close() cache = ZEO.cache.ClientCache('cache', size=(1<<33)+(1<<20)) self.assertEquals(cache.load(n1), (b'x', n2)) cache.close()
def testVeryLargeCaches(self): cache = ZEO.cache.ClientCache('cache', size=(1 << 32) + (1 << 20)) cache.store(n1, n2, None, "x") cache.close() cache = ZEO.cache.ClientCache('cache', size=(1 << 33) + (1 << 20)) self.assertEquals(cache.load(n1), ('x', n2)) cache.close()
def testOldObjectLargerThanCache(self): if self.cache.path: os.remove(self.cache.path) cache = ZEO.cache.ClientCache(size=50) # We store an object that is a bit larger than the cache can handle. cache.store(n1, n2, n3, "x"*64) # We can see that it was not stored. self.assertEquals(None, cache.load(n1)) # If an object cannot be stored in the cache, it must not be # recorded as non-current. self.assert_(1 not in cache.noncurrent)
def testOldObjectLargerThanCache(self): if self.cache.path: os.remove(self.cache.path) cache = ZEO.cache.ClientCache(size=50) # We store an object that is a bit larger than the cache can handle. cache.store(n1, n2, n3, "x" * 64) # We can see that it was not stored. self.assertEquals(None, cache.load(n1)) # If an object cannot be stored in the cache, it must not be # recorded as non-current. self.assert_(1 not in cache.noncurrent)
def test_clear_zeo_cache(self): cache = self.cache for i in range(10): cache.store(p64(i), n2, None, str(i).encode()) cache.store(p64(i), n1, n2, str(i).encode()+b'old') self.assertEqual(len(cache), 20) self.assertEqual(cache.load(n3), (b'3', n2)) self.assertEqual(cache.loadBefore(n3, n2), (b'3old', n1, n2)) cache.clear() self.assertEqual(len(cache), 0) self.assertEqual(cache.load(n3), None) self.assertEqual(cache.loadBefore(n3, n2), None)
def test_clear_zeo_cache(self): cache = self.cache for i in range(10): cache.store(p64(i), n2, None, str(i)) cache.store(p64(i), n1, n2, str(i) + 'old') self.assertEqual(len(cache), 20) self.assertEqual(cache.load(n3), ('3', n2)) self.assertEqual(cache.loadBefore(n3, n2), ('3old', n1, n2)) cache.clear() self.assertEqual(len(cache), 0) self.assertEqual(cache.load(n3), None) self.assertEqual(cache.loadBefore(n3, n2), None)
def test_loadBefore_doesnt_miss_current(self): # Make sure that loadBefore get's current data if there # isn't non-current data cache = self.cache oid = n1 cache.store(oid, n1, None, b'first') self.assertEqual(cache.loadBefore(oid, n1), None) self.assertEqual(cache.loadBefore(oid, n2), (b'first', n1, None)) self.cache.invalidate(oid, n2) cache.store(oid, n2, None, b'second') self.assertEqual(cache.loadBefore(oid, n1), None) self.assertEqual(cache.loadBefore(oid, n2), (b'first', n1, n2)) self.assertEqual(cache.loadBefore(oid, n3), (b'second', n2, None))
def testCurrentObjectLargerThanCache(self): if self.cache.path: os.remove(self.cache.path) cache = ZEO.cache.ClientCache(size=50) # We store an object that is a bit larger than the cache can handle. cache.store(n1, '', n2, None, "x" * 64) # We can see that it was not stored. self.assertEquals(None, self.cache.load(n1)) # If an object cannot be stored in the cache, it must not be # recorded as current. self.assert_(n1 not in self.cache.current) # Regression test: invalidation must still work. cache.invalidate(n1, '', n2)
def testEviction(self): # Manually override the current maxsize cache = ZEO.cache.ClientCache(None, 3395) # Trivial test of eviction code. Doesn't test non-current # eviction. data = [b"z" * i for i in range(100)] for i in range(50): n = p64(i) cache.store(n, n, None, data[i]) self.assertEquals(len(cache), i + 1) # The cache is now almost full. The next insert # should delete some objects. n = p64(50) cache.store(n, n, None, data[51]) self.assert_(len(cache) < 51)
def testEviction(self): # Manually override the current maxsize cache = ZEO.cache.ClientCache(None, 3395) # Trivial test of eviction code. Doesn't test non-current # eviction. data = ["z" * i for i in range(100)] for i in range(50): n = p64(i) cache.store(n, n, None, data[i]) self.assertEquals(len(cache), i + 1) # The cache is now almost full. The next insert # should delete some objects. n = p64(50) cache.store(n, n, None, data[51]) self.assert_(len(cache) < 51)
def testChangingCacheSize(self): # start with a small cache data = b'x' recsize = ZEO.cache.allocated_record_overhead+len(data) for extra in (2, recsize-2): cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra) for i in range(100): cache.store(p64(i), n1, None, data) self.assertEquals(len(cache), 100) self.assertEquals(os.path.getsize( 'cache'), ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra) # Now make it smaller cache.close() small = 50 cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra) self.assertEquals(len(cache), small) self.assertEquals(os.path.getsize( 'cache'), ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), set(range(small))) for i in range(100, 110): cache.store(p64(i), n1, None, data) # We use small-1 below because an extra object gets # evicted because of the optimization to assure that we # always get a free block after a new allocated block. expected_len = small - 1 self.assertEquals(len(cache), expected_len) expected_oids = set(list(range(11, 50))+list(range(100, 110))) self.assertEquals( set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Make sure we can reopen with same size cache.close() cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra) self.assertEquals(len(cache), expected_len) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Now make it bigger cache.close() large = 150 cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) self.assertEquals(len(cache), expected_len) self.assertEquals(os.path.getsize( 'cache'), ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) for i in range(200, 305): cache.store(p64(i), n1, None, data) # We use large-2 for the same reason we used small-1 above. expected_len = large-2 self.assertEquals(len(cache), expected_len) expected_oids = set(list(range(11, 50)) + list(range(106, 110)) + list(range(200, 305))) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Make sure we can reopen with same size cache.close() cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) self.assertEquals(len(cache), expected_len) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Cleanup cache.close() os.remove('cache')
def testChangingCacheSize(self): # start with a small cache data = 'x' recsize = ZEO.cache.allocated_record_overhead + len(data) for extra in (2, recsize - 2): cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + 100 * recsize + extra) for i in range(100): cache.store(p64(i), n1, None, data) self.assertEquals(len(cache), 100) self.assertEquals( os.path.getsize('cache'), ZEO.cache.ZEC_HEADER_SIZE + 100 * recsize + extra) # Now make it smaller cache.close() small = 50 cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + small * recsize + extra) self.assertEquals(len(cache), small) self.assertEquals( os.path.getsize('cache'), ZEO.cache.ZEC_HEADER_SIZE + small * recsize + extra) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), set(range(small))) for i in range(100, 110): cache.store(p64(i), n1, None, data) # We use small-1 below because an extra object gets # evicted because of the optimization to assure that we # always get a free block after a new allocated block. expected_len = small - 1 self.assertEquals(len(cache), expected_len) expected_oids = set(range(11, 50) + range(100, 110)) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Make sure we can reopen with same size cache.close() cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + small * recsize + extra) self.assertEquals(len(cache), expected_len) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Now make it bigger cache.close() large = 150 cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + large * recsize + extra) self.assertEquals(len(cache), expected_len) self.assertEquals( os.path.getsize('cache'), ZEO.cache.ZEC_HEADER_SIZE + large * recsize + extra) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) for i in range(200, 305): cache.store(p64(i), n1, None, data) # We use large-2 for the same reason we used small-1 above. expected_len = large - 2 self.assertEquals(len(cache), expected_len) expected_oids = set( range(11, 50) + range(106, 110) + range(200, 305)) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Make sure we can reopen with same size cache.close() cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + large * recsize + extra) self.assertEquals(len(cache), expected_len) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Cleanup cache.close() os.remove('cache')