def test_of_size_1(self):
     lru = LRUCache(1)
     lru.set(2, 1)
     self.assertEqual(lru.get(2), 1)
     lru.set(3, 2)
     self.assertIsNone(lru.get(2))
     self.assertEqual(lru.get(3), 2)
 def test_of_size_2(self):
     lru = LRUCache(2)
     lru.set(2, 1)
     lru.set(1, 1)
     self.assertEqual(lru.get(2), 1)
     lru.set(4, 1)
     self.assertIsNone(lru.get(1))
     self.assertEqual(lru.get(2), 1)
Example #3
0
 def test_least_recent(self):
     cache = LRUCache(2)
     cache.put(1, 1)
     cache.put(2, 2)
     cache.get(1)
     cache.put(2, 4)
     cache.put(3, 3)
     result = 1 not in cache.keys
     self.assertEqual(result, True,
                      'Least Recently used memory removal test Passed')
Example #4
0
 def test_update(self):
     cache = LRUCache(2)
     cache.put(1, 'One')
     cache.put(1, 'Two')
     result = cache.get(1)
     self.assertEqual(result, 'Two', 'Update test Passed')
Example #5
0
 def test_get(self):
     cache = LRUCache(2)
     cache.put(1, 'One')
     result = cache.get(1)
     self.assertEqual(result, 'One', 'Get test Passed')
Example #6
0
File: main.py Project: rr8shah/LRU
from LRU import LRUCache
'''
This code creates the cache memory and executes 
some of the methods for demonstration purpose

'''

cache = LRUCache(2)
cache.put(1, 1)
cache.put(2, 2)
cache.get(1)
cache.put(3, 3)
cache._print()
cache.get(2)
cache.put(4, 4)
cache._print()
cache.get(3)
cache.get(4)
cache.get(2)
cache._del(4)
cache._print()
cache.get(4)
cache.reset()
cache._print()
cache.get(1)
Example #7
0
class Collection(object):
	def __init__(self, name, database):


		# The maximum limit of each JSON file
		self.FILE_SIZE_LIMIT = 100

		# The write-in frequency
		self.WRITE_IN_FREQ = 0.1

		# The size for the query cache file
		self.QUERY_CACHE_SIZE_LIMIT = 30

		# The number of JSON files and the path for the current JSON file
		self.file_num, self.current_file = find_current_file(name, os.getcwd()+'/'+database.db_name)

		self.storage = JSONStorage(self.current_file)


		self.name = name
		self._db = database

		# Cache for the whole collection
		self._cache = {}

		# Cache for the latest file that is being used to write in
		self._current_cache = {}

		#Cache for the most frequently queried items
		# self._query_cache = LRUCache(self.QUERY_CACHE_SIZE_LIMIT)


		# Each entry has a unique id as the primary key

		existing_ids = self._read().keys()

		if existing_ids:
			self._last_id = max(i for i in existing_ids)
		else:
			self._last_id = 0

		self._load_query_cache()
		self.update_storage()

	def setting(self, file_size=None, query_cache_size=None, write_freq=None):
		if file_size is not None:
			self.FILE_SIZE_LIMIT = file_size

		if query_cache_size is not None:
			self.QUERY_CACHE_SIZE_LIMIT = query_cache_size

		if write_freq is not None:
			self.WRITE_IN_FREQ = write_freq

	def _load_query_cache(self):

		self.query_storage = JSONStorage(os.getcwd()+'/'+ self._db.db_name + '/' + self.name + '_query.json')
		self._query_cache = LRUCache(self.QUERY_CACHE_SIZE_LIMIT, self.query_storage.read(is_ordered_dict=True))

		# print self._query_cache.items.keys()


	def update_storage(self):
		if len(self._current_cache) >= self.FILE_SIZE_LIMIT:
		# If we enter this function because of the file size limit, we do the following:
			## write to the file
			## create the next file
			## point our current storage block to the next file
			self.storage.write(dict(self._current_cache))
			# print time.ctime(), 'size limit'
			self._current_cache = {}
			self.file_num += 1
			self.storage = JSONStorage(os.getcwd()+'/'+ self._db.db_name + '/' + self.name + '_' + str(self.file_num) + '.json')
		else:
		# Else, it is just because of the timer expiration, we do the regular write-in
			# print time.ctime(), 'regular write-in', len(self._current_cache)

			self.storage.write(dict(self._current_cache))
			self.query_storage.write(self._query_cache.items)
			# print self._query_cache.items

		# Set the timer again. Make it periodic.
		self.t = threading.Timer(self.WRITE_IN_FREQ, self.update_storage)
		self.t.start()

	def process_element(self, func, cond=None, eid=None):
		data = self._read()

		if eid is not None:
			for i in eid:
				func(data, eid)

		else:
			for eid in list(data):
				if cond(data[eid]):
					func(data, eid)

		self._write(data)

	def _get_next_id(self):
		current = self._last_id + 1
		self._last_id = current

		return current

	def _read(self):

		if self._cache == {}:
		# When the current collection is first read, read it all at once to the cache.
			self._read_all()
		return self._cache


	def _read_all(self):
		for i in range(1, self.file_num+1):
			raw = JSONStorage(find_nth_file(self.name, os.getcwd()+'/'+self._db.db_name, i)).read()
			for key in list(raw):
				eid = int(key)
				self._cache[eid] = Element(raw[key], eid)
				if i == self.file_num:
					self._current_cache[eid] = Element(raw[key], eid)

	def _write(self, values):

		self.storage.write(values)

	def __len__(self):
		return len(self._read())

	def all(self):
		return list(self._read().values())

	def insert(self, value):

		eid = self._get_next_id()
		self._cache[eid] = value
		self._current_cache[eid] = value


		'''
		If the file in-use reaches the size limit, we do the following:
		1. cancel the timer
		2. enter the update_storage
		Otherwise, we can just wait until the timer expires
		'''
		if len(self._current_cache) >= self.FILE_SIZE_LIMIT:
			self.t.cancel()
			self.update_storage()

	def insert_multiple(self, values):
		for value in values:
			self.insert(value)

	def update(self, fields, cond=None, eids= None):

		'''
		Update can be performed by specifying eid or cond, or both.
		But at least one of them should be specified.
		'''

		if cond is None and eids is None:
			eids = self._cache.keys()

		if cond is not None:
			eids = self._select_to_be_updated_eid(cond, eids)

		pages_eid = self._group_eids(eids)

		self._update_cache(eids, pages_eid, fields)
		self._update_disk(pages_eid, fields)

	def _update_disk(self, pages_eid, fields, remove=False):
		# do the update for the files
		temp_caches = {}

		for page in pages_eid:
			temp_caches[page] = self._read_page(page)
			temp_caches[page] = self._write_in_page(fields, pages_eid[page], temp_caches[page], remove)

		for page in temp_caches:
			self._update_page(page, temp_caches[page])

	def _group_eids(self, eids):
		'''
		group eids based on which page it belongs to
		'''

		pages_eid = {}
		for i in eids:
			page = int(math.ceil(float(i)/float(self.FILE_SIZE_LIMIT)))
			if page not in pages_eid:
				pages_eid[page] = [i]
			else:
				pages_eid[page].append(i)

		return pages_eid

	def _select_to_be_updated_eid(self, cond, eids):

		# Given a condition, return a list of id that satisfies the condition
		filtered_ids = []

		if eids is None:
			# if edis not specified, iterate all possible eids
			for id in self._cache:
				if cond(self._cache[id]):
					filtered_ids.append(id)
		else:
			# check whether the given id satisfied the cond
			for id in eids:
				if cond(self._cache[id]):
					filtered_ids.append(id)
		return filtered_ids

	def _update_cache(self, eids, pages_eid, fields=None, remove=False):
		# update the cache first before write-in to the file

		for i in eids:
			if remove:
				self._cache.pop(i, None)
			else:
				self._cache[i].update(fields)
		for page in pages_eid:
			if page == self.file_num:
				for id in pages_eid[page]:
					if remove:
						self._current_cache.pop(id, None)
					else:
						self._current_cache[id].update(fields)


	def _read_page(self, page):

		'''
		Used for update()
		read every related page
		return a temp_cache
		'''

		page_content = JSONStorage(find_nth_file(self.name, os.getcwd()+'/'+self._db.db_name, page)).read()
		temp_cache = {}
		for key in list(page_content):
			id = int(key)
			temp_cache[id] = Element(page_content[key], id)
		return temp_cache

	def _write_in_page(self, fields, eids, temp_cache, remove=False):
		# write in the updated items
		for eid in eids:
			if remove:
				temp_cache.pop(eid, None)
			else:
				try:
					temp_cache[eid].update(fields)
				except KeyError:
					pass
		return temp_cache

	def _update_page(self, page, cache):

		'''
		Write in the temp_cache given by _read_page
		'''

		page_content = JSONStorage(find_nth_file(self.name, os.getcwd()+'/'+self._db.db_name, page))
		page_content.write(cache)

	def remove(self, cond=None, eids=None):

		if cond is None and eids is None:
			# this means removing all elements
			eids = self._cache.keys()

		if cond is not None:
			eids = self._select_to_be_updated_eid(cond, eids)

		pages_eid = self._group_eids(eids)

		self._update_cache(eids, pages_eid, fields=None, remove=True)

		self._update_disk(pages_eid, fields=None, remove=True)

	def search(self, cond, keys=None):

		elements = self._query_cache.get(str(cond))

		if elements == -1:
			elements = [e for e in self.all() if cond(e)]
			self._query_cache.set(str(cond), elements)
			if keys is not None:
				selected_elements = [{key: e[key] for key in keys} for e in elements]
				return selected_elements
		return elements

	def clear(self):
		self.remove()

	def get(self, cond=None, eid=None):

		if eid is not None:
			read_all = self._read()
			return read_all[eid]

		elements = self.search(cond)

		if elements == []:
			return {}
		return elements[0]

	def close(self):
		# when closing the database, remember to store those that are still in cache and cancel the write-in.
		self.t.cancel()
		self.update_storage()

		self._cache.clear()
		self._current_cache.clear()

		self.t.cancel()
Example #8
0
class Node():
    def __init__(self, fellow, my_ip):
        self.addr = my_ip
        self.fellow = fellow
        self.lock = threading.Lock()
        self.DB = {}
        self.log = []
        self.staged = None
        self.term = 0
        self.status = FOLLOWER
        self.majority = ((len(self.fellow) + 1) // 2) + 1
        self.voteCount = 0
        self.commitIdx = 0
        self.timeout_thread = None
        self.init_timeout()
        self.capacity = 3
        self.cache = LRUCache(capacity=self.capacity)

    # increment only when we are candidate and receive positve vote
    # change status to LEADER and start heartbeat as soon as we reach majority
    def incrementVote(self):
        self.voteCount += 1
        if self.voteCount >= self.majority:
            print(f"{self.addr} becomes the leader of term {self.term}")
            self.status = LEADER
            self.startHeartBeat()

    # vote for myself, increase term, change status to candidate
    # reset the timeout and start sending request to followers
    def startElection(self):
        self.term += 1
        self.voteCount = 0
        self.status = CANDIDATE
        self.init_timeout()
        self.incrementVote()
        self.send_vote_req()

    # ------------------------------
    # ELECTION TIME CANDIDATE

    # spawn threads to request vote for all followers until get reply
    def send_vote_req(self):
        # TODO: use map later for better performance
        # we continue to ask to vote to the address that haven't voted yet
        # till everyone has voted
        # or I am the leader
        for voter in self.fellow:
            threading.Thread(target=self.ask_for_vote,
                             args=(voter, self.term)).start()

    # request vote to other servers during given election term
    def ask_for_vote(self, voter, term):

        # need to include self.commitIdx, only up-to-date candidate could win
        #debugger (self.addr+'+'+voter)
        channel = grpc.insecure_channel(voter)
        stub = mykvserver_pb2_grpc.KVServerStub(channel)

        message = mykvserver_pb2.VoteMessage()
        #debugger (stub.VoteRequest(message),2)

        message.term = term
        message.commitIdx = self.commitIdx
        if self.staged:
            message.staged.act = self.staged['act']
            message.staged.key = self.staged['key']
            message.staged.value = self.staged['value']

        while self.status == CANDIDATE and self.term == term:

            reply = stub.VoteRequest(message)
            if reply:
                #choice = reply.json()["choice"]
                choice = reply.choice
                #print(f"RECEIVED VOTE {choice} from {voter}")
                if choice and self.status == CANDIDATE:
                    self.incrementVote()
                elif not choice:
                    # they declined because either I'm out-of-date or not newest term
                    # update my term and terminate the vote_req
                    #term = reply.json()["term"]
                    term = int(reply.term)
                    if term > self.term:
                        self.term = term
                        self.status = FOLLOWER
                    # fix out-of-date needed
                break

    # ------------------------------
    # ELECTION TIME FOLLOWER

    # some other server is asking
    def decide_vote(self, term, commitIdx, staged):
        # new election
        # decline all non-up-to-date candidate's vote request as well
        # but update term all the time, not reset timeout during decision
        # also vote for someone that has our staged version or a more updated one
        if self.term < term and self.commitIdx <= commitIdx and (staged or
                                                                 (self.staged
                                                                  == staged)):
            self.reset_timeout()
            self.term = term
            return True, self.term
        else:
            return False, self.term

    # ------------------------------
    # START PRESIDENT

    def startHeartBeat(self):
        #print("Starting HEARTBEAT")
        if self.staged:
            # we have something staged at the beginngin of our leadership
            # we consider it as a new payload just received and spread it aorund
            self.handle_put(self.staged)

        for each in self.fellow:
            t = threading.Thread(target=self.send_heartbeat, args=(each, ))
            t.start()

    def update_follower_commitIdx(self, follower):
        channel = grpc.insecure_channel(follower)
        stub = mykvserver_pb2_grpc.KVServerStub(channel)
        message = mykvserver_pb2.HBMessage()
        message.term = self.term
        message.addr = self.addr
        message.action = 'commit'
        message.payload.act = self.log[-1]['act']
        message.payload.key = self.log[-1]['key']
        message.commitIdx = self.commitIdx
        if self.log[-1]['value']:
            message.payload.value = self.log[-1]['value']
        reply = stub.HeartBeat(message)

    def send_heartbeat(self, follower):
        # check if the new follower have same commit index, else we tell them to update to our log level
        if self.log:
            self.update_follower_commitIdx(follower)

        while self.status == LEADER:
            start = time.time()
            channel = grpc.insecure_channel(follower)
            stub = mykvserver_pb2_grpc.KVServerStub(channel)

            ping = mykvserver_pb2.JoinRequest()
            #print(ping)
            if ping:
                if follower not in self.fellow:
                    self.fellow.append(follower)
                message = mykvserver_pb2.HBMessage()
                message.term = self.term
                message.addr = self.addr
                reply = stub.HeartBeat(message)
                if reply:
                    self.heartbeat_reply_handler(reply.term, reply.commitIdx)
                delta = time.time() - start
                # keep the heartbeat constant even if the network speed is varying
                time.sleep((cfg.HB_TIME - delta) / 1000)
            else:
                for index in range(len(self.fellow)):
                    if self.fellow[index] == follower:
                        self.fellow.pop(index)
                        print('Server {} lost connect'.format(follower))
                        break

    # we may step down when get replied
    def heartbeat_reply_handler(self, term, commitIdx):
        # i thought i was leader, but a follower told me
        # that there is a new term, so i now step down
        if term > self.term:
            self.term = term
            self.status = FOLLOWER
            self.init_timeout()

        # TODO logging replies

    # ------------------------------
    # FOLLOWER STUFF

    def reset_timeout(self):
        self.election_time = time.time() + utils.random_timeout()

    # /heartbeat

    def heartbeat_follower(self, msg):
        # weird case if 2 are PRESIDENT of same term.
        # both receive an heartbeat
        # we will both step down

        term = msg["term"]
        if self.term <= term:
            self.leader = msg["addr"]
            self.reset_timeout()
            # in case I am not follower
            # or started an election and lost it
            if self.status == CANDIDATE:
                self.status = FOLLOWER
            elif self.status == LEADER:
                self.status = FOLLOWER
                self.init_timeout()
            # i have missed a few messages
            if self.term < term:
                self.term = term

            # handle client request
            if "action" in msg:
                print("received action", msg)
                action = msg["action"]
                # logging after first msg
                if action == "log":
                    payload = msg["payload"]
                    self.staged = payload
                    #print(self.staged)
                # proceeding staged transaction
                elif self.commitIdx <= msg["commitIdx"]:
                    #print('update staged')
                    if not self.staged:
                        self.staged = msg["payload"]
                    self.commit()

        return self.term, self.commitIdx

    # initiate timeout thread, or reset it
    def init_timeout(self):
        self.reset_timeout()
        # safety guarantee, timeout thread may expire after election
        if self.timeout_thread and self.timeout_thread.isAlive():
            return
        self.timeout_thread = threading.Thread(target=self.timeout_loop)
        self.timeout_thread.start()

    # the timeout function
    def timeout_loop(self):
        # only stop timeout thread when winning the election
        while self.status != LEADER:
            delta = self.election_time - time.time()
            if delta < 0:
                self.startElection()
            else:
                time.sleep(delta)

    def handle_get(self, payload):
        print('handle_getting ', payload)
        key = payload["key"]
        act = payload["act"]
        if act == 'get':
            print("getting", payload)
            cache_res = self.cache.get(key)
            if cache_res is not None:
                print('result in cache')
                payload["value"] = cache_res
                return payload
            elif key in self.DB:
                print('result in db')
                payload["value"] = self.DB[key]
                return payload
        '''
        elif act == 'del':
            print('deleting',payload)
            if key in self.DB:
                self.DB[key] = None
                return payload
        '''
        return None

    # takes a message and an array of confirmations and spreads it to the followers
    # if it is a comit it releases the lock
    def spread_update(self, message, confirmations=None, lock=None):
        for i, each in enumerate(self.fellow):

            channel = grpc.insecure_channel(each)
            stub = mykvserver_pb2_grpc.KVServerStub(channel)
            m = mykvserver_pb2.HBMessage()
            m.term = message['term']
            m.addr = message['addr']
            if message['payload'] is not None:
                #print(message['payload'])
                m.payload.act = message['payload']['act']
                m.payload.key = message['payload']['key']
                m.payload.value = message['payload']['value']
            #m.action = 'commit'
            if message['action']:
                m.action = message['action']
            m.commitIdx = self.commitIdx
            r = stub.HeartBeat(m)
            if r and confirmations:
                # print(f" - - {message['action']} by {each}")
                confirmations[i] = True
        if lock:
            lock.release()

    def handle_put(self, payload):
        #print("putting", payload)
        # lock to only handle one request at a time
        self.lock.acquire()
        self.staged = payload
        waited = 0
        log_message = {
            "term": self.term,
            "addr": self.addr,
            "payload": payload,
            "action": "log",
            "commitIdx": self.commitIdx
        }

        # spread log  to everyone
        log_confirmations = [False] * len(self.fellow)
        threading.Thread(target=self.spread_update,
                         args=(log_message, log_confirmations)).start()
        while sum(log_confirmations) + 1 < self.majority:
            waited += 0.0005
            time.sleep(0.0005)
            if waited > cfg.MAX_LOG_WAIT / 1000:
                print(f"waited {cfg.MAX_LOG_WAIT} ms, update rejected:")
                self.lock.release()
                return False
        # reach this point only if a majority has replied and tell everyone to commit
        commit_message = {
            "term": self.term,
            "addr": self.addr,
            "payload": payload,
            "action": "commit",
            "commitIdx": self.commitIdx
        }
        #print('commit to all')
        can_delete = self.commit()
        threading.Thread(target=self.spread_update,
                         args=(commit_message, None, self.lock)).start()
        print(
            "majority reached, replied to client, sending message to commit, message:",
            commit_message)
        return can_delete

    # put staged key-value pair into local database
    def commit(self):
        self.commitIdx += 1
        self.log.append(self.staged)
        key = self.staged["key"]
        act = self.staged["act"]
        value = None
        can_delete = True
        cache_update = False
        #if self.staged['value'] == 'None':
        #    self.DB[key]= None
        #    key = None
        #    can_delete = False
        if act == 'put':
            #print('it\'s a put transaction')
            value = self.staged["value"]
            self.DB[key] = value
            cache_update = True
        elif act == 'del':
            #print('it\' s a delete transaction')
            if self.DB[key]:
                self.DB[key] = None
            else:
                can_delete = False
            cache_update = True
        if cache_update:
            self.cache.set(key, value)
            self.cache.getallkeys()
        # put newly inserted key-value pair into local cache

        # empty the staged so we can vote accordingly if there is a tie
        self.staged = None
        return can_delete
Example #9
0
 def test_put(self):
     lru = LRUCache(3)
     self._put(lru)
     lru.put(1, 54)
     assert lru.get(1) == 54
     assert lru.get(2) == 20
Example #10
0
 def test_reset(self):
     lru = LRUCache(3)
     self._put(lru)
     lru.reset()
     assert lru.get(1) == -1
     assert lru.get(2) == -1