Example #1
0
 def __init__(self, cache_size, lookback_count):
     """
     Initalizes this LRUWithLFUCache
     """
     LRUCache.__init__(self, cache_size)
     self._lookback_count = lookback_count
     self._cache_frequencies = {}
Example #2
0
 def cache_object(self, obj_id, size, xtime, next_line=None, force=True):
     # No, this goes horribly wrong because LRUCache uses
     # this method internally to put the obj_id on top of the stack
     # self._cache_frequencies[obj_id] = 0
     if obj_id not in self._cache_frequencies:
         self._cache_frequencies[obj_id] = 0
     LRUCache.cache_object(self, obj_id, size, xtime)
Example #3
0
 def test_less_capacity(self):
     lru = LRUCache(2)
     self._put(lru)
     assert lru.get(1) == -1
     assert lru.get(3) == 40
     self._put(lru)
     assert lru.get(3) == 40
     assert lru.get(2) == 20
     assert lru.get(1) == -1
 def test_of_size_1(self):
     lru = LRUCache(1)
     lru.set(2, 1)
     self.assertEqual(lru.get(2), 1)
     lru.set(3, 2)
     self.assertIsNone(lru.get(2))
     self.assertEqual(lru.get(3), 2)
 def test_of_size_2(self):
     lru = LRUCache(2)
     lru.set(2, 1)
     lru.set(1, 1)
     self.assertEqual(lru.get(2), 1)
     lru.set(4, 1)
     self.assertIsNone(lru.get(1))
     self.assertEqual(lru.get(2), 1)
Example #6
0
    def test_init(self):

        cache = LRUCache(4)
        self.assertEqual(cache.capacity, 4)
        self.assertEqual(cache.head, cache.tail)
        self.assertEqual(len(cache.key_to_prev), 0)
        self.assertTrue(isinstance(cache, LRUCache))
Example #7
0
 def __init__(self, fellow, my_ip):
     self.addr = my_ip
     self.fellow = fellow
     self.lock = threading.Lock()
     self.DB = {}
     self.log = []
     self.staged = None
     self.term = 0
     self.status = FOLLOWER
     self.majority = ((len(self.fellow) + 1) // 2) + 1
     self.voteCount = 0
     self.commitIdx = 0
     self.timeout_thread = None
     self.init_timeout()
     self.capacity = 3
     self.cache = LRUCache(capacity=self.capacity)
Example #8
0
    def __init__(self, cache_size, min_obj_size, max_obj_size):

        """
            cache_size in bytes.
        """
        self._max_size = cache_size

        self.stats = CacheStats.CacheStats("LRU", cache_size)
        self.daily_stats = CacheStats.DailyCacheStats(cache_size)

        ts = int(time.time())

        get_size = int(0.333333 * cache_size)
        put_size = int(0.666666 * cache_size)
        self.get_lru = LRUCache(get_size, 0, 0)
        self.put_fifo = LRUCache(put_size, 0, 0)
Example #9
0
    def test_operation2(self):

        cache = LRUCache(1)
        cache.set(2, 1, 11)
        self.assertEqual(cache.get(2), 1)

        cache.set(3, 2, 11)
        self.assertEqual(cache.get(2), -1)
        self.assertEqual(cache.get(3), 2)
Example #10
0
 def test_least_recent(self):
     cache = LRUCache(2)
     cache.put(1, 1)
     cache.put(2, 2)
     cache.get(1)
     cache.put(2, 4)
     cache.put(3, 3)
     result = 1 not in cache.keys
     self.assertEqual(result, True,
                      'Least Recently used memory removal test Passed')
Example #11
0
 def test_maximum_capacity(self):
     cache = LRUCache(2)
     cache.put(1, 1)
     cache.put(2, 2)
     cache.get(1)
     cache.put(2, 4)
     cache.put(3, 3)
     result = len(cache.cache)
     self.assertEqual(result, cache.capacity,
                      'Maximum capacity not exceeded test Passed')
Example #12
0
    def test_operation1(self):

        cache = LRUCache(2)

        cache.set(2, 1, 11)
        cache.set(1, 1, 11)
        self.assertEqual(cache.get(2), 1)
        cache.set(4, 1, 11)
        self.assertEqual(cache.get(1), -1)
        self.assertEqual(cache.get(2), 1)
Example #13
0
 def test_delete(self):
     cache = LRUCache(2)
     cache.put(1, 'One')
     cache.put(2, 'Two')
     cache._del(1)
     result = len(cache.cache)
     self.assertEqual(result, cache.capacity - 1, 'Delete test Passed')
Example #14
0
 def test_reset(self):
     cache = LRUCache(2)
     cache.put(1, 'One')
     cache.put(2, 'Two')
     cache.reset()
     result = len(cache.cache)
     self.assertEqual(result, 0, 'Reset test Passed')
Example #15
0
 def test_get(self):
     lru = LRUCache(3)
     self._put(lru)
     assert lru.get(1) == 10
     assert lru.get(2) == 20
     lru.put(1, 30)
     assert lru.get(1) == 30
Example #16
0
 def test_delete(self):
     lru = LRUCache(3)
     self._put(lru)
     lru.delete(1)
     lru.delete(2)
     assert lru.get(1) == -1
     assert lru.get(2) == -1
     assert lru.get(3) == 40
Example #17
0
 def setUp(self):
     print('In setUp()')
     self.fixture = LRUCache()
Example #18
0
 def test_get(self):
     cache = LRUCache(2)
     cache.put(1, 'One')
     result = cache.get(1)
     self.assertEqual(result, 'One', 'Get test Passed')
Example #19
0
 def setUp(self):
     print('In setUp()')
     self.fixture = LRUCache()
Example #20
0
class CacheValuesTest(unittest.TestCase):

    def setUp(self):
        print('In setUp()')
        self.fixture = LRUCache()

    def tearDown(self):
        print('In tearDown()')
        del self.fixture

    def test_empty_cache(self):
        print('Test : Empty Cache')
        self.assertDictEqual(self.fixture.getStore(),{},"Cache is empty")

    def test_cache_oneEntry(self):
        print('Test : Cache with single value')
        self.fixture.find(1)
        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(),{1:1},"Cache has a single entry")

    def test_cache_repeatedOneEntry(self):
        print('Test : Cache with single value repeatedly')
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(),{1:1},"Cache has a single entry")

    def test_cache_fullSize(self):
        print('Test : Cache full size')
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)
        self.fixture.find(6)
        self.fixture.find(7)
        self.fixture.find(8)
        self.fixture.find(9)
        self.fixture.find(0)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(),{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9},"Cache has all entries")

    def test_cache_overflow(self):
        print('Test : Cache overflow')
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)
        self.fixture.find(6)
        self.fixture.find(7)
        self.fixture.find(8)
        self.fixture.find(9)
        self.fixture.find(0)
        self.fixture.find(16)
        self.fixture.find(27)
        self.fixture.find(38)
        self.fixture.find(49)
        self.fixture.find(50)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(),{0: 0, 6: 6, 7: 7, 8: 8, 9: 9, 38: 38, 16: 16, 49: 49, 50: 50, 27: 27},"Cache has all entries")

    def test_cache_random(self):
        print('Test : Cache random')
        self.fixture.find(1)
        self.fixture.find(22342)
        self.fixture.find("harry")
        self.fixture.find(-4689)
        self.fixture.find("&&&&")
        self.fixture.find(690040809480)
        self.fixture.find("0090")
        self.fixture.find(8)
        self.fixture.find(0)
        self.fixture.find(0)
        self.fixture.find("16")
        self.fixture.find(27568)
        self.fixture.find(38)
        self.fixture.find(49)
        self.fixture.find(50)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(),{0: 0, 49: 49, 38: 38, 6272037681056615: '16', 690040809480: 690040809480, -8147289704323569492: '&&&&', 27568: 27568, 8: 8, 50: 50, -3495090546366936577: '0090'},"Cache has all entries")

    def test_cache_alternateValues(self):
        print('Test : Cache alternating Values')
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(2)
        self.fixture.find(1)
        self.fixture.find(3)
        self.fixture.find(1)
        self.fixture.find(3)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(2)
        self.fixture.find(1)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(),{1: 1, 2: 2, 3: 3},"Cache has all entries")

    def test_cache_alternateHitsandMisses(self):
        print('Test : Cache hits  and misses for repeating values')
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)
        self.fixture.find(6)
        self.fixture.find(7)
        self.fixture.find(8)
        self.fixture.find(9)
        self.fixture.find(10)
        self.fixture.find(11)
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(),{1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11},"Cache has all entries")

    def test_cache_String(self):
        print('Test : Cache with String values')
        self.fixture.find("Harry")
        self.fixture.find("Archer")
        self.fixture.find("Dexter")
        self.fixture.find("Seinfield")
        self.fixture.find("Friends")
        self.fixture.find("Shield")
        self.fixture.find("Vikings")
        self.fixture.find("Grimm")
        self.fixture.find("Google")
        self.fixture.find("Yahoo")
        self.fixture.find("Saleforce")
        self.fixture.find("twitter")
        self.fixture.find("facebook")
        self.fixture.find("capio")
        self.fixture.find("Python")
        self.fixture.find("java")
        self.fixture.find("xml")
        self.fixture.find("html")
        self.fixture.find("random strings to test String Caching")


        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(),{-5999452984715080672: 'xml', -2338026935110240988: 'java', -1782981222247821945: 'random strings to test String Caching', -2359742753373747800: 'Python', -6598467229008937334: 'twitter', 3021726190866786059: 'Saleforce', -8781860034246656723: 'Yahoo', 7799575877465763251: 'html', 3162525260849330260: 'facebook', -8502831814997350055: 'capio'},"Cache has all entries")

    def test_cache_exception(self):
        print('Test : Cache with single value')
        try:
        	self.fixture.find(x)
        except:
        	pass
        else:
            self.fail('Did not NameError')
Example #21
0
 def test_capacity(self):
     cache = LRUCache(2)
     result = cache.capacity
     self.assertEqual(result, 2, 'Capacity test Passed')
Example #22
0
class CacheValuesTest(unittest.TestCase):
    def setUp(self):
        print('In setUp()')
        self.fixture = LRUCache()

    def tearDown(self):
        print('In tearDown()')
        del self.fixture

    def test_empty_cache(self):
        print('Test : Empty Cache')
        self.assertDictEqual(self.fixture.getStore(), {}, "Cache is empty")

    def test_cache_oneEntry(self):
        print('Test : Cache with single value')
        self.fixture.find(1)
        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(), {1: 1},
                             "Cache has a single entry")

    def test_cache_repeatedOneEntry(self):
        print('Test : Cache with single value repeatedly')
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)
        self.fixture.find(1)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(), {1: 1},
                             "Cache has a single entry")

    def test_cache_fullSize(self):
        print('Test : Cache full size')
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)
        self.fixture.find(6)
        self.fixture.find(7)
        self.fixture.find(8)
        self.fixture.find(9)
        self.fixture.find(0)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(), {
            0: 0,
            1: 1,
            2: 2,
            3: 3,
            4: 4,
            5: 5,
            6: 6,
            7: 7,
            8: 8,
            9: 9
        }, "Cache has all entries")

    def test_cache_overflow(self):
        print('Test : Cache overflow')
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)
        self.fixture.find(6)
        self.fixture.find(7)
        self.fixture.find(8)
        self.fixture.find(9)
        self.fixture.find(0)
        self.fixture.find(16)
        self.fixture.find(27)
        self.fixture.find(38)
        self.fixture.find(49)
        self.fixture.find(50)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(), {
            0: 0,
            6: 6,
            7: 7,
            8: 8,
            9: 9,
            38: 38,
            16: 16,
            49: 49,
            50: 50,
            27: 27
        }, "Cache has all entries")

    def test_cache_random(self):
        print('Test : Cache random')
        self.fixture.find(1)
        self.fixture.find(22342)
        self.fixture.find("harry")
        self.fixture.find(-4689)
        self.fixture.find("&&&&")
        self.fixture.find(690040809480)
        self.fixture.find("0090")
        self.fixture.find(8)
        self.fixture.find(0)
        self.fixture.find(0)
        self.fixture.find("16")
        self.fixture.find(27568)
        self.fixture.find(38)
        self.fixture.find(49)
        self.fixture.find(50)

        print self.fixture.getStore()
        self.assertDictEqual(
            self.fixture.getStore(), {
                0: 0,
                49: 49,
                38: 38,
                6272037681056615: '16',
                690040809480: 690040809480,
                -8147289704323569492: '&&&&',
                27568: 27568,
                8: 8,
                50: 50,
                -3495090546366936577: '0090'
            }, "Cache has all entries")

    def test_cache_alternateValues(self):
        print('Test : Cache alternating Values')
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(2)
        self.fixture.find(1)
        self.fixture.find(3)
        self.fixture.find(1)
        self.fixture.find(3)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(2)
        self.fixture.find(1)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(), {
            1: 1,
            2: 2,
            3: 3
        }, "Cache has all entries")

    def test_cache_alternateHitsandMisses(self):
        print('Test : Cache hits  and misses for repeating values')
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)
        self.fixture.find(6)
        self.fixture.find(7)
        self.fixture.find(8)
        self.fixture.find(9)
        self.fixture.find(10)
        self.fixture.find(11)
        self.fixture.find(1)
        self.fixture.find(2)
        self.fixture.find(3)
        self.fixture.find(4)
        self.fixture.find(5)

        print self.fixture.getStore()
        self.assertDictEqual(self.fixture.getStore(), {
            1: 1,
            2: 2,
            3: 3,
            4: 4,
            5: 5,
            7: 7,
            8: 8,
            9: 9,
            10: 10,
            11: 11
        }, "Cache has all entries")

    def test_cache_String(self):
        print('Test : Cache with String values')
        self.fixture.find("Harry")
        self.fixture.find("Archer")
        self.fixture.find("Dexter")
        self.fixture.find("Seinfield")
        self.fixture.find("Friends")
        self.fixture.find("Shield")
        self.fixture.find("Vikings")
        self.fixture.find("Grimm")
        self.fixture.find("Google")
        self.fixture.find("Yahoo")
        self.fixture.find("Saleforce")
        self.fixture.find("twitter")
        self.fixture.find("facebook")
        self.fixture.find("capio")
        self.fixture.find("Python")
        self.fixture.find("java")
        self.fixture.find("xml")
        self.fixture.find("html")
        self.fixture.find("random strings to test String Caching")

        print self.fixture.getStore()
        self.assertDictEqual(
            self.fixture.getStore(), {
                -5999452984715080672: 'xml',
                -2338026935110240988: 'java',
                -1782981222247821945: 'random strings to test String Caching',
                -2359742753373747800: 'Python',
                -6598467229008937334: 'twitter',
                3021726190866786059: 'Saleforce',
                -8781860034246656723: 'Yahoo',
                7799575877465763251: 'html',
                3162525260849330260: 'facebook',
                -8502831814997350055: 'capio'
            }, "Cache has all entries")

    def test_cache_exception(self):
        print('Test : Cache with single value')
        try:
            self.fixture.find(x)
        except:
            pass
        else:
            self.fail('Did not NameError')
Example #23
0
class Collection(object):
	def __init__(self, name, database):


		# The maximum limit of each JSON file
		self.FILE_SIZE_LIMIT = 100

		# The write-in frequency
		self.WRITE_IN_FREQ = 0.1

		# The size for the query cache file
		self.QUERY_CACHE_SIZE_LIMIT = 30

		# The number of JSON files and the path for the current JSON file
		self.file_num, self.current_file = find_current_file(name, os.getcwd()+'/'+database.db_name)

		self.storage = JSONStorage(self.current_file)


		self.name = name
		self._db = database

		# Cache for the whole collection
		self._cache = {}

		# Cache for the latest file that is being used to write in
		self._current_cache = {}

		#Cache for the most frequently queried items
		# self._query_cache = LRUCache(self.QUERY_CACHE_SIZE_LIMIT)


		# Each entry has a unique id as the primary key

		existing_ids = self._read().keys()

		if existing_ids:
			self._last_id = max(i for i in existing_ids)
		else:
			self._last_id = 0

		self._load_query_cache()
		self.update_storage()

	def setting(self, file_size=None, query_cache_size=None, write_freq=None):
		if file_size is not None:
			self.FILE_SIZE_LIMIT = file_size

		if query_cache_size is not None:
			self.QUERY_CACHE_SIZE_LIMIT = query_cache_size

		if write_freq is not None:
			self.WRITE_IN_FREQ = write_freq

	def _load_query_cache(self):

		self.query_storage = JSONStorage(os.getcwd()+'/'+ self._db.db_name + '/' + self.name + '_query.json')
		self._query_cache = LRUCache(self.QUERY_CACHE_SIZE_LIMIT, self.query_storage.read(is_ordered_dict=True))

		# print self._query_cache.items.keys()


	def update_storage(self):
		if len(self._current_cache) >= self.FILE_SIZE_LIMIT:
		# If we enter this function because of the file size limit, we do the following:
			## write to the file
			## create the next file
			## point our current storage block to the next file
			self.storage.write(dict(self._current_cache))
			# print time.ctime(), 'size limit'
			self._current_cache = {}
			self.file_num += 1
			self.storage = JSONStorage(os.getcwd()+'/'+ self._db.db_name + '/' + self.name + '_' + str(self.file_num) + '.json')
		else:
		# Else, it is just because of the timer expiration, we do the regular write-in
			# print time.ctime(), 'regular write-in', len(self._current_cache)

			self.storage.write(dict(self._current_cache))
			self.query_storage.write(self._query_cache.items)
			# print self._query_cache.items

		# Set the timer again. Make it periodic.
		self.t = threading.Timer(self.WRITE_IN_FREQ, self.update_storage)
		self.t.start()

	def process_element(self, func, cond=None, eid=None):
		data = self._read()

		if eid is not None:
			for i in eid:
				func(data, eid)

		else:
			for eid in list(data):
				if cond(data[eid]):
					func(data, eid)

		self._write(data)

	def _get_next_id(self):
		current = self._last_id + 1
		self._last_id = current

		return current

	def _read(self):

		if self._cache == {}:
		# When the current collection is first read, read it all at once to the cache.
			self._read_all()
		return self._cache


	def _read_all(self):
		for i in range(1, self.file_num+1):
			raw = JSONStorage(find_nth_file(self.name, os.getcwd()+'/'+self._db.db_name, i)).read()
			for key in list(raw):
				eid = int(key)
				self._cache[eid] = Element(raw[key], eid)
				if i == self.file_num:
					self._current_cache[eid] = Element(raw[key], eid)

	def _write(self, values):

		self.storage.write(values)

	def __len__(self):
		return len(self._read())

	def all(self):
		return list(self._read().values())

	def insert(self, value):

		eid = self._get_next_id()
		self._cache[eid] = value
		self._current_cache[eid] = value


		'''
		If the file in-use reaches the size limit, we do the following:
		1. cancel the timer
		2. enter the update_storage
		Otherwise, we can just wait until the timer expires
		'''
		if len(self._current_cache) >= self.FILE_SIZE_LIMIT:
			self.t.cancel()
			self.update_storage()

	def insert_multiple(self, values):
		for value in values:
			self.insert(value)

	def update(self, fields, cond=None, eids= None):

		'''
		Update can be performed by specifying eid or cond, or both.
		But at least one of them should be specified.
		'''

		if cond is None and eids is None:
			eids = self._cache.keys()

		if cond is not None:
			eids = self._select_to_be_updated_eid(cond, eids)

		pages_eid = self._group_eids(eids)

		self._update_cache(eids, pages_eid, fields)
		self._update_disk(pages_eid, fields)

	def _update_disk(self, pages_eid, fields, remove=False):
		# do the update for the files
		temp_caches = {}

		for page in pages_eid:
			temp_caches[page] = self._read_page(page)
			temp_caches[page] = self._write_in_page(fields, pages_eid[page], temp_caches[page], remove)

		for page in temp_caches:
			self._update_page(page, temp_caches[page])

	def _group_eids(self, eids):
		'''
		group eids based on which page it belongs to
		'''

		pages_eid = {}
		for i in eids:
			page = int(math.ceil(float(i)/float(self.FILE_SIZE_LIMIT)))
			if page not in pages_eid:
				pages_eid[page] = [i]
			else:
				pages_eid[page].append(i)

		return pages_eid

	def _select_to_be_updated_eid(self, cond, eids):

		# Given a condition, return a list of id that satisfies the condition
		filtered_ids = []

		if eids is None:
			# if edis not specified, iterate all possible eids
			for id in self._cache:
				if cond(self._cache[id]):
					filtered_ids.append(id)
		else:
			# check whether the given id satisfied the cond
			for id in eids:
				if cond(self._cache[id]):
					filtered_ids.append(id)
		return filtered_ids

	def _update_cache(self, eids, pages_eid, fields=None, remove=False):
		# update the cache first before write-in to the file

		for i in eids:
			if remove:
				self._cache.pop(i, None)
			else:
				self._cache[i].update(fields)
		for page in pages_eid:
			if page == self.file_num:
				for id in pages_eid[page]:
					if remove:
						self._current_cache.pop(id, None)
					else:
						self._current_cache[id].update(fields)


	def _read_page(self, page):

		'''
		Used for update()
		read every related page
		return a temp_cache
		'''

		page_content = JSONStorage(find_nth_file(self.name, os.getcwd()+'/'+self._db.db_name, page)).read()
		temp_cache = {}
		for key in list(page_content):
			id = int(key)
			temp_cache[id] = Element(page_content[key], id)
		return temp_cache

	def _write_in_page(self, fields, eids, temp_cache, remove=False):
		# write in the updated items
		for eid in eids:
			if remove:
				temp_cache.pop(eid, None)
			else:
				try:
					temp_cache[eid].update(fields)
				except KeyError:
					pass
		return temp_cache

	def _update_page(self, page, cache):

		'''
		Write in the temp_cache given by _read_page
		'''

		page_content = JSONStorage(find_nth_file(self.name, os.getcwd()+'/'+self._db.db_name, page))
		page_content.write(cache)

	def remove(self, cond=None, eids=None):

		if cond is None and eids is None:
			# this means removing all elements
			eids = self._cache.keys()

		if cond is not None:
			eids = self._select_to_be_updated_eid(cond, eids)

		pages_eid = self._group_eids(eids)

		self._update_cache(eids, pages_eid, fields=None, remove=True)

		self._update_disk(pages_eid, fields=None, remove=True)

	def search(self, cond, keys=None):

		elements = self._query_cache.get(str(cond))

		if elements == -1:
			elements = [e for e in self.all() if cond(e)]
			self._query_cache.set(str(cond), elements)
			if keys is not None:
				selected_elements = [{key: e[key] for key in keys} for e in elements]
				return selected_elements
		return elements

	def clear(self):
		self.remove()

	def get(self, cond=None, eid=None):

		if eid is not None:
			read_all = self._read()
			return read_all[eid]

		elements = self.search(cond)

		if elements == []:
			return {}
		return elements[0]

	def close(self):
		# when closing the database, remember to store those that are still in cache and cancel the write-in.
		self.t.cancel()
		self.update_storage()

		self._cache.clear()
		self._current_cache.clear()

		self.t.cancel()
Example #24
0
class Node():
    def __init__(self, fellow, my_ip):
        self.addr = my_ip
        self.fellow = fellow
        self.lock = threading.Lock()
        self.DB = {}
        self.log = []
        self.staged = None
        self.term = 0
        self.status = FOLLOWER
        self.majority = ((len(self.fellow) + 1) // 2) + 1
        self.voteCount = 0
        self.commitIdx = 0
        self.timeout_thread = None
        self.init_timeout()
        self.capacity = 3
        self.cache = LRUCache(capacity=self.capacity)

    # increment only when we are candidate and receive positve vote
    # change status to LEADER and start heartbeat as soon as we reach majority
    def incrementVote(self):
        self.voteCount += 1
        if self.voteCount >= self.majority:
            print(f"{self.addr} becomes the leader of term {self.term}")
            self.status = LEADER
            self.startHeartBeat()

    # vote for myself, increase term, change status to candidate
    # reset the timeout and start sending request to followers
    def startElection(self):
        self.term += 1
        self.voteCount = 0
        self.status = CANDIDATE
        self.init_timeout()
        self.incrementVote()
        self.send_vote_req()

    # ------------------------------
    # ELECTION TIME CANDIDATE

    # spawn threads to request vote for all followers until get reply
    def send_vote_req(self):
        # TODO: use map later for better performance
        # we continue to ask to vote to the address that haven't voted yet
        # till everyone has voted
        # or I am the leader
        for voter in self.fellow:
            threading.Thread(target=self.ask_for_vote,
                             args=(voter, self.term)).start()

    # request vote to other servers during given election term
    def ask_for_vote(self, voter, term):

        # need to include self.commitIdx, only up-to-date candidate could win
        #debugger (self.addr+'+'+voter)
        channel = grpc.insecure_channel(voter)
        stub = mykvserver_pb2_grpc.KVServerStub(channel)

        message = mykvserver_pb2.VoteMessage()
        #debugger (stub.VoteRequest(message),2)

        message.term = term
        message.commitIdx = self.commitIdx
        if self.staged:
            message.staged.act = self.staged['act']
            message.staged.key = self.staged['key']
            message.staged.value = self.staged['value']

        while self.status == CANDIDATE and self.term == term:

            reply = stub.VoteRequest(message)
            if reply:
                #choice = reply.json()["choice"]
                choice = reply.choice
                #print(f"RECEIVED VOTE {choice} from {voter}")
                if choice and self.status == CANDIDATE:
                    self.incrementVote()
                elif not choice:
                    # they declined because either I'm out-of-date or not newest term
                    # update my term and terminate the vote_req
                    #term = reply.json()["term"]
                    term = int(reply.term)
                    if term > self.term:
                        self.term = term
                        self.status = FOLLOWER
                    # fix out-of-date needed
                break

    # ------------------------------
    # ELECTION TIME FOLLOWER

    # some other server is asking
    def decide_vote(self, term, commitIdx, staged):
        # new election
        # decline all non-up-to-date candidate's vote request as well
        # but update term all the time, not reset timeout during decision
        # also vote for someone that has our staged version or a more updated one
        if self.term < term and self.commitIdx <= commitIdx and (staged or
                                                                 (self.staged
                                                                  == staged)):
            self.reset_timeout()
            self.term = term
            return True, self.term
        else:
            return False, self.term

    # ------------------------------
    # START PRESIDENT

    def startHeartBeat(self):
        #print("Starting HEARTBEAT")
        if self.staged:
            # we have something staged at the beginngin of our leadership
            # we consider it as a new payload just received and spread it aorund
            self.handle_put(self.staged)

        for each in self.fellow:
            t = threading.Thread(target=self.send_heartbeat, args=(each, ))
            t.start()

    def update_follower_commitIdx(self, follower):
        channel = grpc.insecure_channel(follower)
        stub = mykvserver_pb2_grpc.KVServerStub(channel)
        message = mykvserver_pb2.HBMessage()
        message.term = self.term
        message.addr = self.addr
        message.action = 'commit'
        message.payload.act = self.log[-1]['act']
        message.payload.key = self.log[-1]['key']
        message.commitIdx = self.commitIdx
        if self.log[-1]['value']:
            message.payload.value = self.log[-1]['value']
        reply = stub.HeartBeat(message)

    def send_heartbeat(self, follower):
        # check if the new follower have same commit index, else we tell them to update to our log level
        if self.log:
            self.update_follower_commitIdx(follower)

        while self.status == LEADER:
            start = time.time()
            channel = grpc.insecure_channel(follower)
            stub = mykvserver_pb2_grpc.KVServerStub(channel)

            ping = mykvserver_pb2.JoinRequest()
            #print(ping)
            if ping:
                if follower not in self.fellow:
                    self.fellow.append(follower)
                message = mykvserver_pb2.HBMessage()
                message.term = self.term
                message.addr = self.addr
                reply = stub.HeartBeat(message)
                if reply:
                    self.heartbeat_reply_handler(reply.term, reply.commitIdx)
                delta = time.time() - start
                # keep the heartbeat constant even if the network speed is varying
                time.sleep((cfg.HB_TIME - delta) / 1000)
            else:
                for index in range(len(self.fellow)):
                    if self.fellow[index] == follower:
                        self.fellow.pop(index)
                        print('Server {} lost connect'.format(follower))
                        break

    # we may step down when get replied
    def heartbeat_reply_handler(self, term, commitIdx):
        # i thought i was leader, but a follower told me
        # that there is a new term, so i now step down
        if term > self.term:
            self.term = term
            self.status = FOLLOWER
            self.init_timeout()

        # TODO logging replies

    # ------------------------------
    # FOLLOWER STUFF

    def reset_timeout(self):
        self.election_time = time.time() + utils.random_timeout()

    # /heartbeat

    def heartbeat_follower(self, msg):
        # weird case if 2 are PRESIDENT of same term.
        # both receive an heartbeat
        # we will both step down

        term = msg["term"]
        if self.term <= term:
            self.leader = msg["addr"]
            self.reset_timeout()
            # in case I am not follower
            # or started an election and lost it
            if self.status == CANDIDATE:
                self.status = FOLLOWER
            elif self.status == LEADER:
                self.status = FOLLOWER
                self.init_timeout()
            # i have missed a few messages
            if self.term < term:
                self.term = term

            # handle client request
            if "action" in msg:
                print("received action", msg)
                action = msg["action"]
                # logging after first msg
                if action == "log":
                    payload = msg["payload"]
                    self.staged = payload
                    #print(self.staged)
                # proceeding staged transaction
                elif self.commitIdx <= msg["commitIdx"]:
                    #print('update staged')
                    if not self.staged:
                        self.staged = msg["payload"]
                    self.commit()

        return self.term, self.commitIdx

    # initiate timeout thread, or reset it
    def init_timeout(self):
        self.reset_timeout()
        # safety guarantee, timeout thread may expire after election
        if self.timeout_thread and self.timeout_thread.isAlive():
            return
        self.timeout_thread = threading.Thread(target=self.timeout_loop)
        self.timeout_thread.start()

    # the timeout function
    def timeout_loop(self):
        # only stop timeout thread when winning the election
        while self.status != LEADER:
            delta = self.election_time - time.time()
            if delta < 0:
                self.startElection()
            else:
                time.sleep(delta)

    def handle_get(self, payload):
        print('handle_getting ', payload)
        key = payload["key"]
        act = payload["act"]
        if act == 'get':
            print("getting", payload)
            cache_res = self.cache.get(key)
            if cache_res is not None:
                print('result in cache')
                payload["value"] = cache_res
                return payload
            elif key in self.DB:
                print('result in db')
                payload["value"] = self.DB[key]
                return payload
        '''
        elif act == 'del':
            print('deleting',payload)
            if key in self.DB:
                self.DB[key] = None
                return payload
        '''
        return None

    # takes a message and an array of confirmations and spreads it to the followers
    # if it is a comit it releases the lock
    def spread_update(self, message, confirmations=None, lock=None):
        for i, each in enumerate(self.fellow):

            channel = grpc.insecure_channel(each)
            stub = mykvserver_pb2_grpc.KVServerStub(channel)
            m = mykvserver_pb2.HBMessage()
            m.term = message['term']
            m.addr = message['addr']
            if message['payload'] is not None:
                #print(message['payload'])
                m.payload.act = message['payload']['act']
                m.payload.key = message['payload']['key']
                m.payload.value = message['payload']['value']
            #m.action = 'commit'
            if message['action']:
                m.action = message['action']
            m.commitIdx = self.commitIdx
            r = stub.HeartBeat(m)
            if r and confirmations:
                # print(f" - - {message['action']} by {each}")
                confirmations[i] = True
        if lock:
            lock.release()

    def handle_put(self, payload):
        #print("putting", payload)
        # lock to only handle one request at a time
        self.lock.acquire()
        self.staged = payload
        waited = 0
        log_message = {
            "term": self.term,
            "addr": self.addr,
            "payload": payload,
            "action": "log",
            "commitIdx": self.commitIdx
        }

        # spread log  to everyone
        log_confirmations = [False] * len(self.fellow)
        threading.Thread(target=self.spread_update,
                         args=(log_message, log_confirmations)).start()
        while sum(log_confirmations) + 1 < self.majority:
            waited += 0.0005
            time.sleep(0.0005)
            if waited > cfg.MAX_LOG_WAIT / 1000:
                print(f"waited {cfg.MAX_LOG_WAIT} ms, update rejected:")
                self.lock.release()
                return False
        # reach this point only if a majority has replied and tell everyone to commit
        commit_message = {
            "term": self.term,
            "addr": self.addr,
            "payload": payload,
            "action": "commit",
            "commitIdx": self.commitIdx
        }
        #print('commit to all')
        can_delete = self.commit()
        threading.Thread(target=self.spread_update,
                         args=(commit_message, None, self.lock)).start()
        print(
            "majority reached, replied to client, sending message to commit, message:",
            commit_message)
        return can_delete

    # put staged key-value pair into local database
    def commit(self):
        self.commitIdx += 1
        self.log.append(self.staged)
        key = self.staged["key"]
        act = self.staged["act"]
        value = None
        can_delete = True
        cache_update = False
        #if self.staged['value'] == 'None':
        #    self.DB[key]= None
        #    key = None
        #    can_delete = False
        if act == 'put':
            #print('it\'s a put transaction')
            value = self.staged["value"]
            self.DB[key] = value
            cache_update = True
        elif act == 'del':
            #print('it\' s a delete transaction')
            if self.DB[key]:
                self.DB[key] = None
            else:
                can_delete = False
            cache_update = True
        if cache_update:
            self.cache.set(key, value)
            self.cache.getallkeys()
        # put newly inserted key-value pair into local cache

        # empty the staged so we can vote accordingly if there is a tie
        self.staged = None
        return can_delete
Example #25
0
 def test_put(self):
     lru = LRUCache(3)
     self._put(lru)
     lru.put(1, 54)
     assert lru.get(1) == 54
     assert lru.get(2) == 20
Example #26
0
	def _load_query_cache(self):

		self.query_storage = JSONStorage(os.getcwd()+'/'+ self._db.db_name + '/' + self.name + '_query.json')
		self._query_cache = LRUCache(self.QUERY_CACHE_SIZE_LIMIT, self.query_storage.read(is_ordered_dict=True))
Example #27
0
 def test_update(self):
     cache = LRUCache(2)
     cache.put(1, 'One')
     cache.put(1, 'Two')
     result = cache.get(1)
     self.assertEqual(result, 'Two', 'Update test Passed')
Example #28
0
 def test_size(self):
     lru = LRUCache(1)
     assert lru.capacity == 1
Example #29
0
 def test_reset(self):
     lru = LRUCache(3)
     self._put(lru)
     lru.reset()
     assert lru.get(1) == -1
     assert lru.get(2) == -1
Example #30
0
 def get_cached(self, obj_id, xtime, next_line=None):
     if obj_id in self._cache_frequencies:
         self._cache_frequencies[obj_id] += 1
     else:
         self._cache_frequencies[obj_id] = 1
     return LRUCache.get_cached(self, obj_id, xtime)
Example #31
0
File: main.py Project: rr8shah/LRU
from LRU import LRUCache
'''
This code creates the cache memory and executes 
some of the methods for demonstration purpose

'''

cache = LRUCache(2)
cache.put(1, 1)
cache.put(2, 2)
cache.get(1)
cache.put(3, 3)
cache._print()
cache.get(2)
cache.put(4, 4)
cache._print()
cache.get(3)
cache.get(4)
cache.get(2)
cache._del(4)
cache._print()
cache.get(4)
cache.reset()
cache._print()
cache.get(1)
Example #32
0
class SplitLRUCache(AbstractCache.AbstractCache):

    def __init__(self, cache_size, min_obj_size, max_obj_size):

        """
            cache_size in bytes.
        """
        self._max_size = cache_size

        self.stats = CacheStats.CacheStats("LRU", cache_size)
        self.daily_stats = CacheStats.DailyCacheStats(cache_size)

        ts = int(time.time())

        get_size = int(0.333333 * cache_size)
        put_size = int(0.666666 * cache_size)
        self.get_lru = LRUCache(get_size, 0, 0)
        self.put_fifo = LRUCache(put_size, 0, 0)


    def get_cache_stats_total(self):
        return self.stats.to_dict()

    def get_cache_stats_day(self):
        # self.daily_stats.cache_used = self._used_size
        s = self.daily_stats.to_dict()
        self.daily_stats.reset()
        return s

    def get_num_cached_objects(self):
        return self.get_lru.get_num_cached_objects() + self.put_fifo.get_num_cached_objects()

    def is_cached(self, obj_id):
        return self.get_lru.is_cached(obj_id) or self.put_fifo.is_cached(obj_id)

    def is_remembered(self, obj_id):
        return self.is_cached(obj_id)

    def get_free_cache_bytes(self):
        return self.get_lru.get_free_cache_bytes() + self.put_fifo.get_free_cache_bytes() 

    def update_obj_size(self, obj_id, size, delta):
        if self.get_lru.is_cached(obj_id):
            self.get_lru.update_obj_size(obj_id, size, delta)
        if self.put_fifo.is_cached(obj_id):
            self.put_fifo.update_obj_size(obj_id, size, delta)

    def remove_cached(self, obj_id):

        if self.get_lru.is_cached(obj_id):
            self.stats.deleted_objects += 1
            self.stats.cached_objects_current -= 1
            self.daily_stats.deleted_objects += 1
            return self.get_lru.remove_cached(obj_id)

        if self.put_fifo.is_cached(obj_id):
            self.stats.deleted_objects += 1
            self.stats.cached_objects_current -= 1
            self.daily_stats.deleted_objects += 1
            return self.put_fifo.remove_cached(obj_id)

        return None

    def cache_object(self, obj_id, size, xtime, next_line=None, force=True, is_new=False):
        if is_new:
            self.put_fifo.cache_object(obj_id, size, xtime, next_line, force)
        else:
            self.put_fifo.cache_object(obj_id, size, xtime, next_line, force)            


    def get_cached(self, obj_id, xtime, next_line=None):
        if self.get_lru.is_cached(obj_id) or self.put_fifo.is_cached(obj_id):
            self.stats.cache_hits += 1
            self.daily_stats.cache_hits += 1
            return True

        self.stats.cache_misses += 1
        self.daily_stats.cache_misses += 1
        return False

    def rename(self, from_obj_id, to_obj_id):
        self.get_lru.rename(from_obj_id, to_obj_id)
        self.put_fifo.rename(from_obj_id, to_obj_id)

    def check_sanity(self):
        return True

    def dump_cache(self, reason):
        print ("dump")
Example #33
0
 def test_put(self):
     cache = LRUCache(2)
     cache.put(1, 'One')
     result = len(cache.cache)
     self.assertEqual(result, 1, 'Put test Passed')