def main(): verbose = verbose_mode(sys.argv) input_handler = InputHandler() output_handler = OutputHandler("results.txt") [memory_blocks, references] = input_handler.parse_file(sys.argv[1]) fifo = FIFO(copy.deepcopy(memory_blocks), copy.deepcopy(references)) fifo_stats = fifo.run() fifo_stats_str = get_stats_str("FIFO", fifo_stats) otm = OTM(copy.deepcopy(memory_blocks), copy.deepcopy(references)) otm_stats = otm.run() otm_stats_str = get_stats_str("OTM", otm_stats) lru = LRU(copy.deepcopy(memory_blocks), copy.deepcopy(references)) lru_stats = lru.run() lru_stats_str = get_stats_str("LRU", lru_stats) output_handler.write_to_file(fifo_stats_str, otm_stats_str, lru_stats_str) if verbose: print(fifo_stats_str, end="") print(otm_stats_str, end="") print(lru_stats_str)
def selectRange(self, startTimestamp, endTimestamp, keyword): [startFileNumber, startBucket] = self.getNames(startTimestamp) [endFileNumber, endBucket] = self.getNames(endTimestamp) aggregateCount = 0 #If timestamps span more than a day, we need to ensure that we get all the buckets in the range for fileNumber in range(startFileNumber, endFileNumber+1): if fileNumber == startFileNumber: startB = startBucket if startFileNumber==endFileNumber: endB = endBucket else: endB = 287 #hardcoding right now, but can call windowsize if we want this to work for different time ranges elif fileNumber == endFileNumber: startB = 0 endB = endBucket else: startB = 0 endB = 287 for bucketNumber in range(startB, endB+1): bucketNumber = str(bucketNumber) if LRU.get(str(fileNumber)) == -1: with open(str(fileNumber)+'.txt') as data_file: dataMap = json.load(data_file) LRU.set(str(fileNumber), dataMap) else: dataMap = LRU.get(str(fileNumber)) if bucketNumber in dataMap and str(keyword) in dataMap[bucketNumber]: aggregateCount+=dataMap[bucketNumber][str(keyword)] return str(aggregateCount)
def selectRangeAndInterval(self, startTime, endTime, interval, keyword): stime = time.time() print "start time in select range and interval", stime print "start time in select range and interval process", time.clock() interval = interval / 300000 tick = interval * 60 numBuckets = interval / 5 bucketMod = self.setWindow(interval) [startFileNumber, startBucket] = self.getNames(startTime) #convert bucket to string [endFileNumber, endBucket] = self.getNames(endTime) print startFileNumber, endFileNumber #If timestamps span more than a day, we need to ensure that we get all the buckets in the range t = startTime finalList = [] for fileNumber in range(startFileNumber, endFileNumber+1): if LRU.get(str(fileNumber)) == -1: with open(str(fileNumber)+'.txt') as data_file: try: dataMap = json.load(data_file) LRU.set(str(fileNumber), dataMap) except: for i in range(t, endTime+tick, tick): finalList.append((str(i), str(0), keyword)) print "end time", time.time() print "end time", time.clock() dataMap = LRU.get(str(fileNumber)) if fileNumber == startFileNumber: startB = startBucket if startFileNumber==endFileNumber: endB = endBucket else: endB = bucketMod-1 elif fileNumber == endFileNumber+1: startB = 0 endB = endBucket else: startB = 0 endB = bucketMod -1 for bucketNumber in range(startB, endB+1, numBuckets): bucketNumber = str(bucketNumber) total_count = 0 for bucket in range(int(bucketNumber), int(bucketNumber) + numBuckets): if str(bucket) in dataMap and str(keyword) in dataMap[str(bucket)]: count=dataMap[str(bucket)][str(keyword)] total_count += count finalList.append((str(t), str(total_count), keyword)) t = t+ tick if finalList == []: for i in range(t, endTime+tick, tick): finalList.append((str(i), str(0), keyword)) print "end time", time.time() print "end time", time.clock() return finalList print "end time", time.time() print "end time", time.clock() return finalList
def test_get(self): obj = LRU('a',1) obj.put('a') obj.put('b') # obj.put('c') # print(obj.l) self.assertEqual(obj.get(),False)
def testSetSize(self): obj = LRU(2) self.assertEqual(obj.listSize, 2) obj.setSize(4) head = obj.head.next count = 1 while head != obj.head: count += 1 head = head.next self.assertEqual(count, 4)
class LRUTest: def __init__(self): self.lru = LRU() def put(self, key): return self.lru.put(key) def get(self): return self.lru.get() def get_cache(self): return self.lru.get_cache()
def selectRangeForDisplay(self, startTimestamp, endTimestamp, keyword): #sample timestamp is 1449186990 (assuming was divided by 1000 already) print "start Time: ", startTimestamp if (endTimestamp < startTimestamp): return [] tick = 5 * 60 #seconds to add - assuming window size is 5 here! bucketMod = self.setWindow(5) print "bucketMod", bucketMod [startFileNumber, startBucket] = self.getNames(startTimestamp) #convert bucket to string [endFileNumber, endBucket] = self.getNames(endTimestamp) print "fileNum", startFileNumber, endFileNumber #If timestamps span more than a day, we need to ensure that we get all the buckets in the range t = startTimestamp finalList = [] for fileNumber in range(startFileNumber, endFileNumber+1): if fileNumber == startFileNumber: startB = startBucket if startFileNumber==endFileNumber: endB = endBucket else: endB = bucketMod-1 elif fileNumber == endFileNumber: startB = 0 endB = endBucket else: startB = 0 endB = bucketMod -1 for bucketNumber in range(startB, endB+1): bucketNumber = str(bucketNumber) if LRU.get(str(fileNumber)) == -1: with open(str(fileNumber)+'.txt') as data_file: try: dataMap = json.load(data_file) except: for i in range(t, endTimestamp+300, 300): finalList.append((str(i), str(0))) return finalList LRU.set(str(fileNumber), dataMap) else: dataMap = LRU.get(str(fileNumber)) if bucketNumber in dataMap and str(keyword) in dataMap[bucketNumber]: count=dataMap[bucketNumber][str(keyword)] finalList.append((str(t), str(count), keyword)) else: finalList.append((str(t),str(0), keyword)) t = t+ tick if finalList == []: for i in range(t, endTimestamp+300, 300): finalList.append((str(i), str(0), keyword)) return finalList return finalList
def testGetPut(self): obj = LRU(1) obj.put(1, 2) self.assertEqual(obj.get(1), 2) obj.put(1, 4) # checkk if value is updated self.assertEqual(obj.get(1), 4) # check if default value is returned if key isn't there self.assertEqual(obj.get(2, "not found"), "not found")
def testLRUItemIsActuallyRemoved(self): obj = LRU(4) obj.put(1, 1) obj.put(2, 1) obj.put(3, 1) obj.put(4, 1) obj.put(1, 1) # 2 becomes lru obj.put(5, 1) # now get(2) should return none self.assertEqual(obj.get(2), None)
def setUpClass(cls): max_cache = 4 cls.lru = LRU(max_cache) cls.lruFull = LRU(max_cache) cls.lruFull.put('1', 'Test Get 1') cls.lruFull.put('2', 'Test Get 2') cls.lruFull.put('3', 'Test Get 3') cls.lruFull.put('4', 'Test Get 4') cls.lruod = LRUOD(max_cache) cls.lruodFull = LRUOD(max_cache) cls.lruodFull.put('1', 'Test Get 1') cls.lruodFull.put('2', 'Test Get 2') cls.lruodFull.put('3', 'Test Get 3') cls.lruodFull.put('4', 'Test Get 4')
def main(): # initializing with size 3 test_cache = LRU(3) # testing with various values test_cache.find(1) test_cache.find(2) test_cache.find(3) test_cache.find(4) test_cache.find(1) test_cache.find(3) # printing the cache after final transaction test_cache.print_cache()
def testChangeNodeToMostRecentlyUsed(self): obj = LRU(3) obj.put(1, 1) obj.put(2, 1) obj.put(3, 1) node = obj.mapping[1] # points to 1 obj.changeNodeToMostRecentlyUsed(node) self.assertEqual(obj.head, node.next)
def select(self, timestamp, keyword): [filename, bucket] = self.getNames(timestamp) bucket = str(bucket) keyword = str(keyword) #first get the associated page with this data & timestamp if LRU.get(str(filename)) == -1: with open(str(filename)+'.txt') as data_file: dataMap = json.load(data_file) LRU.set(str(filename), dataMap) else: dataMap = LRU.get(str(filename)) if bucket in dataMap and keyword in dataMap[bucket]: return [timestamp, dataMap[bucket][keyword]] else: return [timestamp, 0]
def test_get_cache(self): obj = LRU('a',1) obj.put('a') obj.put('b') obj.put('c') obj.put('d') self.assertEqual(obj.get_cache(),['b','c','d'])
def testAddTailNode(self): obj = LRU(1) obj.addTailNode(1) # add 1 node self.assertEqual(obj.listSize, 2) r = random.randrange(10) obj.addTailNode(r) # add r nodes self.assertEqual(obj.listSize, r + 2)
def testClear(self): obj = LRU(2) obj.put(1, 2) obj.put(3, 4) self.assertEqual(len(obj), 2) obj.clear() self.assertEqual(len(obj), 0)
def main(): l = [2, 9, 1, 7, 3, 5, 7, 6, 9, 2, 1] cache = LRU() for each in l: cache.put(each) print(cache.get_cache()) assert cache.get_cache() == [1, 2, 9, 6, 7, 5, 3]
def insert(self,timestamp, keyword, count): ##assume values is coming in the python form: ##[('clinton', 1),('sanders',1)] ##do we want to store individual timestamps as well filename = timestamp / 86400 tempTime = timestamp / 300 bucket = tempTime % 288 print filename if not os.path.isfile(str(filename)+'.txt'): print 'file doesnt exist' dataMap = {} for i in range(0,288): dataMap[i] = {} with open(str(filename)+'.txt','w') as data_file: #json.dump(ast.literal_eval(str(dataMap)), data_file) json.dump(dataMap, data_file) if LRU.get(str(filename)) == -1: with open(str(filename)+'.txt') as data_file: dataMap = json.load(data_file) LRU.set(str(filename), dataMap) else: dataMap = LRU.get(str(filename)) print "current bucket", bucket print "previous bucket", self.previousBucket if bucket != self.previousBucket: self.incrementalCount[timestamp / 300] = {} for candidate in self.candidateList: if candidate in dataMap[str(self.previousBucket)]: count = dataMap[str(self.previousBucket)][candidate] squaredCount = dataMap[str(self.previousBucket)][candidate] ** 2 else: count = 0 squaredCount = 0 self.totalCounts[str(candidate)] += count self.squaredCounts[str(candidate)] += squaredCount newMean = self.updateMean(self.counts[str(candidate)]+1, self.runningMean[str(candidate)], count) newVar = self.updateVar(self.counts[str(candidate)]+1, self.totalCounts[str(candidate)], self.squaredCounts[str(candidate)]) self.runningVar[str(candidate)] = newVar self.runningMean[str(candidate)] = newMean self.counts[str(candidate)]+=1 if (timestamp/300 -1) not in self.incrementalCount: self.incrementalCount[timestamp/300][candidate] = 0 else: self.incrementalCount[timestamp/300][candidate] = self.incrementalCount[timestamp/300 - 1][candidate] +count self.incrementalCount["lastTime"] = timestamp/300 print("COUNTS: ", self.counts) print("RUNNING AVERAGE:", self.runningMean) print("RUNNING VAR:", self.runningVar) print("INCREMENTAL COUNT", self.incrementalCount) self.previousBucket = bucket #print 'dataMap', dataMap#str(dataMap).replace("u\'","\'") #print "bucket", bucket keywords = dataMap[str(bucket)] bucket = str(bucket) #print 'keywords', keywords if not keyword in keywords: print 'not in keyword so adding' dataMap[bucket][keyword] = count #print 'dataMap', dataMap else: #counts per bucket dataMap[bucket][keyword]+=count #to get the aggregate counts for each keyword for a day if not keyword in dataMap: dataMap[keyword] = count else: dataMap[keyword]+=count with open(str(filename)+'.txt', 'w') as outfile: print 'writing to json output' #print dataMap json.dump(dataMap, outfile) ## we don't want to load it every time. return True
def main(): lru_obj = LRU(3) #tc1 assert lru_obj.get('a')==-1 #tc2 lru_obj.put('a',1) lru_obj.put('b',2) lru_obj.put('c',3) assert lru_obj.get('a')==1, 'Wrong Value' #tc3 assert lru_obj.get_cache()== {'a':1, 'b':2, 'c':3}, 'Cache is not updated properly' #tc4 lru_obj.put('d',4) #tc5 assert len(lru_obj.get_cache())==3, 'capacity crossed' #tc6 assert lru_obj.get('b')==-1, 'Least Recently Used is not being removed when capacity is crossed' print("All test cases passed!!")
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from Utils import Utils #Import Utils module. from FIFO import FIFO #Import FIFO module. from LRU import LRU #Import LRU module. from OTM import OTM #Import OTM module. if __name__ == "__main__": #Runs only if main was not imported as a module. input = Utils.getData() #Get data from the user. fifo = FIFO(input) #Create a FIFO objet. lru = LRU(input) #Create a LRU objet. otm = OTM(input) #Create an OTM objet. fifo.run() #Run the algorithm. lru.run() #Run the algorithm. otm.run() #Run the algorithm. print(fifo) #Show the object information(missing pages). print(otm) #Show the object information(missing pages). print(lru) #Show the object information(missing pages).
def test_put(self): obj = LRU('a',1) self.assertEqual(obj.put('a'),'done')
def test_lru_size(self): with self.assertRaises(ValueError): self.lruBad1 = LRU('banana') self.lruBad2 = LRU(-5) self.assertIsNone(self.lruBad1) self.assertIsNone(self.lruBad2)
from LRU import LRU import random random_cache_size = random.randrange(10) print("Initiaizing cache of size", random_cache_size) cache_obj = LRU(random_cache_size) # returns None as cache is empty print("1 ->", cache_obj.get(1)) # returns "not found" as it is the default argument print("1 ->", cache_obj.get(1, "not found")) cache_obj.put(1, 2) # prints 2 print("1 ->", cache_obj.get(1))
- processes: diccionario de procesos - algoritmo: lista de algoritmos de prioridad disponibles """ import collections import numpy as np import math from LRU import LRU from FIFO import FIFO from Page import Page from Process import Process #Global Variables M = [[-1, -1]] * 128 S = [[-1, -1]] * 256 lru = LRU() fifo = FIFO() global_time = 0 #in deciseconds for arithmetic purposes logs = [] debug = False swaps = 0 #Instances of process processes = {} SIZE_OF_PAGE = 16 #Algorithm algorithm = [fifo, lru] PAGE_REPLACEMENT_ALGORITHM = 0
# print("Page fault of Optimal :{}".format(Optimal(ref_str2,frameSize))) # print("Page fault of LRU:{}".format(LRU(ref_str2,frameSize))) # print("---------------------------") # page_fault_FIFO.append(FIFO(ref_str2,frameSize)) # page_fault_Opti.append(Optimal(ref_str2,frameSize)) # page_fault_LRU.append(LRU(ref_str2,frameSize)) # frame_list.append(frameSize) # frameSize +=1 #show page fault case 3 while frameSize < 7: print("frameSize : {}".format(frameSize)) print("---------------------------") print("Page fault of FIFO :{}".format(FIFO(ref_str3, frameSize))) print("Page fault of Optimal :{}".format(Optimal(ref_str3, frameSize))) print("Page fault of LRU:{}".format(LRU(ref_str3, frameSize))) print("---------------------------") page_fault_FIFO.append(FIFO(ref_str3, frameSize)) page_fault_Opti.append(Optimal(ref_str3, frameSize)) page_fault_LRU.append(LRU(ref_str3, frameSize)) frame_list.append(frameSize) frameSize += 1 # #plot graph #FIFO graph plt.plot(frame_list, page_fault_FIFO) # naming the x axis plt.xlabel('#frame') # naming the y axis plt.ylabel('#page_fault')
#!/usr/bin/env python3 from LRU import LRU datasource = { 1: "one", 2: "two", 3: "three", 4: "four", 5: "five", 6: "six", 7: "seven", 8: "eight", 9: "nine", 10: "ten" } lru = LRU(2, datasource) print(lru.get(1)) print(lru.get(2)) print(lru.get(3)) print(lru.get(4)) print(lru.get(6)) print(lru.get(3)) print(lru.get(6)) print(lru.get(1)) print(lru.get(2)) print(lru.get(5))
from resources import utils from FIFO import FIFO from OTM import OTM from LRU import LRU # Obtains the sequence of memory references memoryREF = utils.readSequence() # Execute FIFO substitution fifoSubstitution = FIFO(memoryREF) fifoSubstitution.runsFifo() # Execute OTM substitution otmSubstitution = OTM(memoryREF) otmSubstitution.runsOtm() # Execute LRU substitution lruSubstitution = LRU(memoryREF) lruSubstitution.runsLru()
def main(a): if a == 1: least_recently_used = LRU(3) least_recently_used.put("A", 0) least_recently_used.put("B", 1) least_recently_used.put("C", 2) least_recently_used.print_cache_dictionary() least_recently_used.print_cache_doubly() print() least_recently_used.get("A") least_recently_used.print_cache_dictionary() least_recently_used.print_cache_doubly() print() else: word_list = [] with open("WORDS") as file: line = file.readline().split() while line: word_list.extend(line) line = file.readline().split() most_frequent_elements(word_list)
from LRU import LRU lru = LRU(300) lru.dlinked.printAll()
def create_initial_dict(isLru=False): if isLru: return LRU(dictionary_size - 256) return {}
def __init__(self): self.lru = LRU()