def testVeryLargeCaches(self): cache = ZEO.cache.ClientCache('cache', size=(1<<32)+(1<<20)) cache.store(n1, n2, None, b"x") cache.close() cache = ZEO.cache.ClientCache('cache', size=(1<<33)+(1<<20)) self.assertEquals(cache.load(n1), (b'x', n2)) cache.close()
def testVeryLargeCaches(self): cache = ZEO.cache.ClientCache('cache', size=(1 << 32) + (1 << 20)) cache.store(n1, n2, None, "x") cache.close() cache = ZEO.cache.ClientCache('cache', size=(1 << 33) + (1 << 20)) self.assertEquals(cache.load(n1), ('x', n2)) cache.close()
def testConversionOfLargeFreeBlocks(self): with open('cache', 'wb') as f: f.write(ZEO.cache.magic + b'\0' * 8 + b'f' + struct.pack(">I", (1 << 32) - 12)) f.seek((1 << 32) - 1) f.write(b'x') cache = ZEO.cache.ClientCache('cache', size=1 << 32) cache.close() cache = ZEO.cache.ClientCache('cache', size=1 << 32) cache.close() with open('cache', 'rb') as f: f.seek(12) self.assertEquals(f.read(1), b'f') self.assertEquals( struct.unpack(">I", f.read(4))[0], ZEO.cache.max_block_size)
def testConversionOfLargeFreeBlocks(self): with open('cache', 'wb') as f: f.write(ZEO.cache.magic+ b'\0'*8 + b'f'+struct.pack(">I", (1<<32)-12) ) f.seek((1<<32)-1) f.write(b'x') cache = ZEO.cache.ClientCache('cache', size=1<<32) cache.close() cache = ZEO.cache.ClientCache('cache', size=1<<32) cache.close() with open('cache', 'rb') as f: f.seek(12) self.assertEquals(f.read(1), b'f') self.assertEquals(struct.unpack(">I", f.read(4))[0], ZEO.cache.max_block_size)
def testChangingCacheSize(self): # start with a small cache data = b'x' recsize = ZEO.cache.allocated_record_overhead+len(data) for extra in (2, recsize-2): cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra) for i in range(100): cache.store(p64(i), n1, None, data) self.assertEquals(len(cache), 100) self.assertEquals(os.path.getsize( 'cache'), ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra) # Now make it smaller cache.close() small = 50 cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra) self.assertEquals(len(cache), small) self.assertEquals(os.path.getsize( 'cache'), ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), set(range(small))) for i in range(100, 110): cache.store(p64(i), n1, None, data) # We use small-1 below because an extra object gets # evicted because of the optimization to assure that we # always get a free block after a new allocated block. expected_len = small - 1 self.assertEquals(len(cache), expected_len) expected_oids = set(list(range(11, 50))+list(range(100, 110))) self.assertEquals( set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Make sure we can reopen with same size cache.close() cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra) self.assertEquals(len(cache), expected_len) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Now make it bigger cache.close() large = 150 cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) self.assertEquals(len(cache), expected_len) self.assertEquals(os.path.getsize( 'cache'), ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) for i in range(200, 305): cache.store(p64(i), n1, None, data) # We use large-2 for the same reason we used small-1 above. expected_len = large-2 self.assertEquals(len(cache), expected_len) expected_oids = set(list(range(11, 50)) + list(range(106, 110)) + list(range(200, 305))) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Make sure we can reopen with same size cache.close() cache = ZEO.cache.ClientCache( 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) self.assertEquals(len(cache), expected_len) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Cleanup cache.close() os.remove('cache')
def testChangingCacheSize(self): # start with a small cache data = 'x' recsize = ZEO.cache.allocated_record_overhead + len(data) for extra in (2, recsize - 2): cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + 100 * recsize + extra) for i in range(100): cache.store(p64(i), n1, None, data) self.assertEquals(len(cache), 100) self.assertEquals( os.path.getsize('cache'), ZEO.cache.ZEC_HEADER_SIZE + 100 * recsize + extra) # Now make it smaller cache.close() small = 50 cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + small * recsize + extra) self.assertEquals(len(cache), small) self.assertEquals( os.path.getsize('cache'), ZEO.cache.ZEC_HEADER_SIZE + small * recsize + extra) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), set(range(small))) for i in range(100, 110): cache.store(p64(i), n1, None, data) # We use small-1 below because an extra object gets # evicted because of the optimization to assure that we # always get a free block after a new allocated block. expected_len = small - 1 self.assertEquals(len(cache), expected_len) expected_oids = set(range(11, 50) + range(100, 110)) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Make sure we can reopen with same size cache.close() cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + small * recsize + extra) self.assertEquals(len(cache), expected_len) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Now make it bigger cache.close() large = 150 cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + large * recsize + extra) self.assertEquals(len(cache), expected_len) self.assertEquals( os.path.getsize('cache'), ZEO.cache.ZEC_HEADER_SIZE + large * recsize + extra) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) for i in range(200, 305): cache.store(p64(i), n1, None, data) # We use large-2 for the same reason we used small-1 above. expected_len = large - 2 self.assertEquals(len(cache), expected_len) expected_oids = set( range(11, 50) + range(106, 110) + range(200, 305)) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Make sure we can reopen with same size cache.close() cache = ZEO.cache.ClientCache('cache', size=ZEO.cache.ZEC_HEADER_SIZE + large * recsize + extra) self.assertEquals(len(cache), expected_len) self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), expected_oids) # Cleanup cache.close() os.remove('cache')