def test_copy(self): pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] cache = LruCache(pairs) self.assertEqual(cache.copy().items(), cache.items()) self.assertEqual(cache.copy().keys(), cache.keys()) self.assertEqual(LruCache().items(), LruCache().copy().items()) self.assertEqual(LruCache().keys(), LruCache().copy().keys())
class XidLruCache: PAGES_PER_SEGMENT = 32 def __init__(self, datadir, pageclass, buffsz): self.datadir = datadir ctrlfile = ControlFile(datadir) self.blcksz = ctrlfile.blcksz self.buffer = LruCache(pageclass, self.blcksz, buffsz) def getfilename(self, segno): pass def readfromdisk(self, pageno): blocksz = self.blcksz segno = pageno / self.PAGES_PER_SEGMENT filename = self.getfilename(segno) try: with open(filename) as file: file.seek(blocksz * pageno) block = file.read(blocksz) except: raise UPgException('error in reading xid') if len(block) != blocksz: raise UPgException('error in reading xid') return block def getlrupage(self, pageno): lrupage = self.buffer.get_and_visit(pageno) if not lrupage: block = self.readfromdisk(pageno) lrupage = self.buffer.put(pageno, block) return lrupage
def test_len(self): pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] cache = LruCache(pairs) self.assertEqual(len(pairs), len(cache)) del cache['a'] self.assertEqual(len(pairs) - 1, len(cache)) cache.clear() self.assertEqual(len(cache), 0)
def test_eq(self): pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] cache = LruCache(pairs) self.assertTrue(cache == cache) self.assertTrue(LruCache() == LruCache()) self.assertTrue(LruCache(pairs) == LruCache(pairs)) self.assertFalse(LruCache() == list()) self.assertFalse(LruCache(pairs) == LruCache(pairs[1:])) self.assertFalse(LruCache(pairs) == LruCache(pairs[::-1]))
def test_getitem(self): pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] cache = LruCache(pairs) with self.assertRaises(KeyError): LruCache()['key'] with self.assertRaises(KeyError): cache['key'] for key, value in pairs: self.assertEqual(value, cache[key])
def test_lock(self, RLockMock): lock = RLockMock() cache = LruCache() lock.__enter__.assert_not_called() lock.__exit__.assert_not_called() cache = LruCache(expires=10) lock.__enter__.assert_called() lock.__exit__.assert_called()
def test_clear(self): pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] cache = LruCache(pairs) self.assertEqual(len(cache), len(pairs)) cache.clear() self.assertEqual(len(cache), 0) cache.clear() self.assertEqual(len(cache), 0) cache.update(pairs) self.assertEqual(len(cache), len(pairs)) cache.clear() self.assertEqual(len(cache), 0)
def __init__(self, pgdatadir, catalog_class): self.pgdatadir = pgdatadir self.catalog_class = catalog_class ctrlfile = ControlFile(pgdatadir) self.blocksz = ctrlfile.blcksz self.segfilesz = ctrlfile.relseg_size self.catalog_version_no = ctrlfile.catalog_version_no self.pg_majorversion = catalog_class.loadpgversion(pgdatadir) self.heapbuffer = LruCache(HeapBufferPage, self.blocksz, HEAP_BUFFER_COUNT)
def test_set_item(self): """Test putting item in cache.""" cache = LruCache(3) cache['k1'] = 100 cache['k2'] = 200 cache['k3'] = 300 cache['k4'] = 400 self.assertEqual(len(cache), 3) self.assertListEqual(list(cache.items()), [('k2', 200), ('k3', 300), ('k4', 400)])
def test_init(self): with self.assertRaises(ValueError): LruCache(capacity=0) pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] self.assertEqual(sorted(LruCache(pairs).items()), pairs) self.assertEqual(sorted(LruCache(dict(pairs)).items()), pairs) self.assertEqual(sorted(LruCache(**dict(pairs)).items()), pairs) self.assertEqual(sorted(LruCache(pairs, e=4, f=5, r=6).items()), pairs + [('e', 4), ('f', 5), ('r', 6)]) cache = LruCache(pairs) cache.__init__([('e', 5), ('t', 6)]) self.assertEqual(sorted(cache.items()), pairs + [('e', 5), ('t', 6)])
def test_update_item(self): """Test update item in cache.""" cache = LruCache(3) cache['k1'] = 100 cache['k2'] = 200 cache['k1'] = 300 self.assertEqual(len(cache), 2) self.assertListEqual(list(cache.items()), [ ('k2', 200), ('k1', 300), ])
def do_size(self, line): """size n - sets the maximum size of the cache to n objects""" if self.cache == None: args = line.split() if len(args) != 1: print "Syntax Error. Expected 1 arguments, got %d" % len(args) return False self.cache = LruCache(int(args[0])) print("SIZE OK") else: print("ERROR")
def test_delete(self, create_mock, CleanManagerMock, RLockMock): cleanManager = CleanManagerMock() lock = RLockMock() node = _ExpNode(key='a', value='b', expires=10) create_mock.return_value = node cache = LruCache() cache.add('a', 'b', expires=10) del cache['a'] cleanManager.add.assert_called_with(node) cleanManager.on_delete.assert_called() lock.__enter__.assert_called() lock.__exit__.assert_called()
def test_get_item(self): """Test getting item from cache.""" cache = LruCache(3) cache['k1'] = 100 self.assertEqual(cache['k1'], 100)
def test_setitem(self): with self.assertRaises(ValueError): LruCache()['a'] = None with self.assertRaises(ValueError): LruCache()[None] = 'a' with self.assertRaises(ValueError): LruCache()[None] = None cache = LruCache(capacity=10) cache['a'] = 1 cache['b'] = 2 self.assertEqual(cache.items(), [('b', 2), ('a', 1)]) cache['a'] = 3 self.assertEqual(cache.items(), [('a', 3), ('b', 2)]) cache['b'] = 4 self.assertEqual(cache.items(), [('b', 4), ('a', 3)]) cache['c'] = 5 self.assertEqual(cache.items(), [('c', 5), ('b', 4), ('a', 3)]) del cache['c'] cache['c'] = 5 self.assertEqual(cache.items(), [('c', 5), ('b', 4), ('a', 3)])
def lru_cache(maxsize=128, expires=10*60): """ A memoized function, backed by an LRU cache. Supports data expiration. >>> @lru_cache(maxsize=2, expires=10) ... def function(x): ... print "function(" + str(x) + ")" ... return x >>> f(3) function(3) 3 >>> f(5) function(5) 5 >>> f(3) # the item hasn't expired yet, the wrapped function won't be invoked 3 >>> f(5) # the item hasn't expired yet, the wrapped function won't be invoked 5 >>> import time >>> time.sleep(3) # enough time to remove the first item (3) from the cache >>> f(3) # since there is no such item in cache, execute the function again function(3) 3 >>> time.sleep(2) # enough time to remove the other item (5) from the cache >>> f(5) # same thing for 5 function(5) 5 >>> f(3) 3 >>> f(4) # the underlying LRU cache will replace 4 with 5 function(4) 4 >>> f(5) function(5) 5 """ # create a single cache per function that is being decorated cache = LruCache(capacity=maxsize, expires=expires) def _lru(function): @wraps(function) def _lru_wrapper(*args, **kwargs): # generate the key key = _get_key(function, args, kwargs) if key in cache: return cache[key] result = function(*args, **kwargs) cache[key] = result return result return _lru_wrapper return _lru
def test_contains(self): self.assertFalse('a' in LruCache()) self.assertFalse('a' in LruCache([('b', 2), ('c', 3), ('d', 4)])) self.assertTrue('b' in LruCache([('b', 2), ('c', 3), ('d', 4)])) self.assertTrue('b' not in LruCache()) self.assertTrue('b' not in LruCache([('c', 3), ('d', 4)])) cache = LruCache([('b', 2), ('c', 3), ('d', 4)]) self.assertTrue('a' not in cache) cache['a'] = 3 self.assertTrue('a' in cache)
def lru_cache(maxsize=128, expires=10 * 60): cache = LruCache(capacity=maxsize, expires=expires) def _lru(function): @wraps(function) def _lru_wrapper(*args, **kwargs): key = _get_key(function, args, kwargs) if key in cache: return cache[key] result = function(*args, **kwargs) cache[key] = result return result return _lru_wrapper return _lru
def test_delitem(self): with self.assertRaises(KeyError): del LruCache()['key'] pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] cache = LruCache(pairs) with self.assertRaises(KeyError): del cache['key'] for index, (key, value) in enumerate(pairs): del cache[key] self.assertEqual(cache.items(), pairs[index+1:][::-1]) # start deleting from the tail cache.update(pairs) for index, (key, value) in enumerate(pairs[::-1]): del cache[key] index = len(pairs) - index - 1 self.assertEqual(cache.items(), pairs[:index][::-1])
class LruRepl(cmd.Cmd): """LruRepl: The LruCache shell (Read Eval Print loop) """ intro = "Welcome to LruCache shell.\n\nType help or ? to list commands.\nCtrl-d or exit to exit.\n" prompt = "lrucache> " cache = None def precmd(self, line): """accept command as upper or lower case""" line = line.lower() return line def default(self, line): """overrides the default error message.""" print 'ERROR' return False def command_preproc(self, line, expected_len): if self.cache == None: print "Cache not initialized. Use SIZE command first." return None args = line.split() if len(args) != expected_len: print "Syntax Error. Expected %d arguments, got %d" % ( expected_len, len(args)) return None return args def do_size(self, line): """size n - sets the maximum size of the cache to n objects""" if self.cache == None: args = line.split() if len(args) != 1: print "Syntax Error. Expected 1 arguments, got %d" % len(args) return False self.cache = LruCache(int(args[0])) print("SIZE OK") else: print("ERROR") def do_set(self, line): """set key value - puts the key/value pair into the cache if the key does not exist or overwrites the value of an existing pair """ args = self.command_preproc(line, 2) if args != None: self.cache.put(args[0], args[1]) print("SET OK") def do_get(self, line): """get key -- prints the value associated the the key if found""" args = self.command_preproc(line, 1) if args != None: v = self.cache.fetch(args[0]) if v == None: print("NOTFOUND") else: print("GOT %s" % v) def do_stats(self, line): """stats - print the cache stats""" if self.cache == None: print "Cache not initialized. Use SIZE command first." return False print("Cache max size: %d" % self.cache.max_size) print("Current size: %d" % self.cache.current_size) def do_dump(self, line): """dump - dump the cache""" if self.cache == None: print "Cache not initialized. Use SIZE command first." return False self.cache.data.dump() def do_eof(self, line): """Ctrl-D to exit""" print '\n' return True def do_exit(self, line): """exits shell""" print "\n" return True
class HeapBuffer: def __init__(self, pgdatadir, catalog_class): self.pgdatadir = pgdatadir self.catalog_class = catalog_class ctrlfile = ControlFile(pgdatadir) self.blocksz = ctrlfile.blcksz self.segfilesz = ctrlfile.relseg_size self.catalog_version_no = ctrlfile.catalog_version_no self.pg_majorversion = catalog_class.loadpgversion(pgdatadir) self.heapbuffer = LruCache(HeapBufferPage, self.blocksz, HEAP_BUFFER_COUNT) ''' 计算buffer tag。 ''' def __getbuftag(self, relfilenode, forknum, blocknum): return struct.pack('5I', relfilenode.space_node, relfilenode.db_node, relfilenode.rel_node, forknum, blocknum) ''' 根据relfilenode得到文件名。根据global tablespace,default table space,还有指定了table space,文件名 规则不一样。 ''' def getrelationpath(self, relfilenode, forknum): assert forknum == MAIN_FORKNUM or forknum == FSM_FORKNUM \ or forknum == VISIBILITYMAP_FORKNUM or forknum == INIT_FORKNUM pgdatadir = self.pgdatadir forkNames = ('', 'fsm', 'vm', 'init') if forknum == MAIN_FORKNUM: filename = relfilenode.rel_node else: filename = '%u_%s' % (relfilenode.rel_node, forkNames[forknum]) if relfilenode.space_node == self.catalog_class.GLOBALTABLESPACE_OID: return '%s/global/%s' % (pgdatadir, filename) elif relfilenode.space_node == self.catalog_class.DEFAULTTABLESPACE_OID: return '%s/base/%u/%s' % (pgdatadir, relfilenode.db_node, filename) else: tablespacedir = "PG_%s_%u" % (self.pg_majorversion, self.catalog_version_no) return '%s/pg_tblspc/%u/%s/%u/%s' % ( pgdatadir, relfilenode.space_node, tablespacedir, relfilenode.db_node, filename) def __loadbuffer(self, relfilenode, forknum, blocknum): try: blocksz = self.blocksz segsz = self.segfilesz filepath = self.getrelationpath(relfilenode, forknum) segno = blocknum / segsz if segno > 0: filepath = '%s.%u' % (filepath, segno) blockoff = blocksz * (blocknum % segsz) with open(filepath) as file: file.seek(blockoff) block = file.read(blocksz) if len(block) != blocksz: logger.error('could not read block %u in file \"%s\": %m' % (blocknum, filepath)) raise UPgException('could not read block in file') return block except IOError: logger.error('could not read block %u in file \"%s\": %m' % (blocknum, filepath)) raise UPgException('could not read block in file') def readpage(self, relfilenode, forknum, blocknum): tag = self.__getbuftag(relfilenode, forknum, blocknum) buffpage = self.heapbuffer.get_and_visit(tag) if not buffpage: buffdata = self.__loadbuffer(relfilenode, forknum, blocknum) buffpage = self.heapbuffer.put(tag, buffdata) return buffpage def getblocknums(self, relfilenode, forknum): blocksz = self.blocksz segsz = self.segfilesz filepath = self.getrelationpath(relfilenode, forknum) segno = 0 while True: if segno > 0: filepath = '%s.%u' % (filepath, segno) if not os.path.exists(filepath): return segno * segsz filesize = os.path.getsize(filepath) / blocksz if filesize > segsz: raise UPgException('could not read block in file') elif filesize < segsz: return segno * segsz + filesize else: segno += 1
class HeapBuffer: def __init__(self, pgdatadir, catalog_class): self.pgdatadir = pgdatadir self.catalog_class = catalog_class ctrlfile = ControlFile(pgdatadir) self.blocksz = ctrlfile.blcksz self.segfilesz = ctrlfile.relseg_size self.catalog_version_no = ctrlfile.catalog_version_no self.pg_majorversion = catalog_class.loadpgversion(pgdatadir) self.heapbuffer = LruCache(HeapBufferPage, self.blocksz, HEAP_BUFFER_COUNT) ''' 计算buffer tag。 ''' def __getbuftag(self, relfilenode, forknum, blocknum): return struct.pack('5I', relfilenode.space_node, relfilenode.db_node, relfilenode.rel_node, forknum, blocknum) ''' 根据relfilenode得到文件名。根据global tablespace,default table space,还有指定了table space,文件名 规则不一样。 ''' def getrelationpath(self, relfilenode, forknum): assert forknum == MAIN_FORKNUM or forknum == FSM_FORKNUM \ or forknum == VISIBILITYMAP_FORKNUM or forknum == INIT_FORKNUM pgdatadir = self.pgdatadir forkNames = ('', 'fsm', 'vm', 'init') if forknum == MAIN_FORKNUM: filename = relfilenode.rel_node else: filename = '%u_%s'%(relfilenode.rel_node, forkNames[forknum]) if relfilenode.space_node == self.catalog_class.GLOBALTABLESPACE_OID: return '%s/global/%s'%(pgdatadir, filename) elif relfilenode.space_node == self.catalog_class.DEFAULTTABLESPACE_OID: return '%s/base/%u/%s'%(pgdatadir, relfilenode.db_node, filename) else: tablespacedir = "PG_%s_%u"%(self.pg_majorversion, self.catalog_version_no) return '%s/pg_tblspc/%u/%s/%u/%s'%(pgdatadir, relfilenode.space_node, tablespacedir, relfilenode.db_node, filename) def __loadbuffer(self, relfilenode, forknum, blocknum): try: blocksz = self.blocksz segsz = self.segfilesz filepath = self.getrelationpath(relfilenode, forknum) segno = blocknum/segsz if segno > 0: filepath = '%s.%u'%(filepath, segno) blockoff = blocksz * (blocknum%segsz) with open(filepath) as file: file.seek(blockoff) block = file.read(blocksz) if len(block) != blocksz: logger.error('could not read block %u in file \"%s\": %m'%(blocknum, filepath)) raise UPgException('could not read block in file') return block except IOError: logger.error('could not read block %u in file \"%s\": %m'%(blocknum, filepath)) raise UPgException('could not read block in file') def readpage(self, relfilenode, forknum, blocknum): tag = self.__getbuftag(relfilenode, forknum, blocknum) buffpage = self.heapbuffer.get_and_visit(tag) if not buffpage: buffdata = self.__loadbuffer(relfilenode, forknum, blocknum) buffpage = self.heapbuffer.put(tag, buffdata) return buffpage def getblocknums(self, relfilenode, forknum): blocksz = self.blocksz segsz = self.segfilesz filepath = self.getrelationpath(relfilenode, forknum) segno = 0 while True: if segno > 0: filepath = '%s.%u' % (filepath, segno) if not os.path.exists(filepath): return segno * segsz filesize = os.path.getsize(filepath)/blocksz if filesize > segsz: raise UPgException('could not read block in file') elif filesize < segsz: return segno * segsz + filesize else: segno += 1
def __init__(self, datadir, pageclass, buffsz): self.datadir = datadir ctrlfile = ControlFile(datadir) self.blcksz = ctrlfile.blcksz self.buffer = LruCache(pageclass, self.blcksz, buffsz)
def test_get_non_existed_item(self): """Test getting non-existed item from cache.""" cache = LruCache(3) with self.assertRaises(KeyError): cache['k1']
def test_iter(self): pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] values = [value for key, value in pairs][::-1] cache = LruCache(pairs) self.assertEqual(list(iter(cache)), values)
def test_repr(self): pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] cache = LruCache(pairs) self.assertEqual(repr(cache), _get_printable(pairs[::-1])) self.assertEqual(repr(LruCache()), '{}')