def __init__(self, N): self.T = Disk(N) self.N = N self.marked = set() self.G = {} ## local access graph self.last_request = -1 self.first_request = False
def __init__(self, N): self.N = N self.CacheRecency = Disk(N) self.CacheFrequecy = priorityqueue(N) self.Hist1 = Disk(N) self.Hist2 = priorityqueue(N) ## Config variables self.decayRate = 1 self.epsilon = 0.05 self.lamb = 0.05 self.learning_phase = N / 2 # self.error_discount_rate = (0.005)**(1.0/N) ## TODO ADD BACK self.error_discount_rate = 1 ## self.learning = True self.policy = 0 self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} self.freq = {} ## TODO add decay_time and decay_factor self.decay_time = N self.decay_factor = 1 ## Accounting variables self.time = 0 self.W = np.array([.5, .5], dtype=np.float32) self.X = np.array([], dtype=np.int32) self.Y1 = np.array([]) self.Y2 = np.array([])
def __init__(self, N): self.N = N self.Cache = Disk(N) self.Hist = Disk(N) ## Config variables self.decayRate = 1 self.epsilon = 0.95 self.lamb = 0.05 self.randomize_rate = 0.5 ## self.accessedTime = {} self.frequency = {} self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} ## Accounting variables self.time = 0 self.W = np.array([.5, .5], dtype=np.float32) self.X = np.array([]) self.Y1 = np.array([]) self.Y2 = np.array([])
def __init__(self, N): self.N = N self.Cache = Disk(N) self.Hist = Disk(N) ## Config variables self.decayRate = 1 self.epsilon = 0.95 self.lamb = 0.05 self.learning_phase = N self.error_discount_rate = (0.005)**(1.0 / N) ## self.learning = True self.policy = 0 self.accessedTime = {} self.frequency = {} self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} ## Accounting variables self.time = 0 self.W = np.array([.5, .5], dtype=np.float32) self.X = np.array([]) self.Y1 = np.array([]) self.Y2 = np.array([])
def __init__(self, N): self.T = [] self.N = N self.disk = Disk(N) self.freq = {} ## Training variables self.X, self.Y = [], [] self.reward = [] self.regret = [] ## Config variables self.batchsize = N self.numbatch = 5 self.discountrate = 0.9 self.error = 0.5 self.reduceErrorRate = 0.975 ## Aux variables self.cachebuff = dequecustom() self.Xbuff = dequecustom() self.Ybuff = dequecustom() self.pageHitBuff = dequecustom() self.hist = dequecustom() self.batchsizeBuff = dequecustom() ## Accounting variables self.currentPageHits = 0 self.current = 0 self.uniquePages = Counter() ## Batch action variable self.action = [0] #self.discount = 0.9 #self.sampleCount = 0 #self.trainingSampleSize = 5 * N ## start tf tf.reset_default_graph() self.input = tf.placeholder(shape=[1, self.N], dtype=tf.float32) W1 = tf.Variable(tf.random_uniform([self.N, 8], 0, 0.01)) out1 = tf.sigmoid(tf.matmul(self.input, W1)) W2 = tf.Variable(tf.random_uniform([8, 2], 0, 0.01)) self.out = tf.matmul(out1, W2) self.predictaction = tf.argmax(self.out) self.nextQ = tf.placeholder(shape=[1, 2], dtype=tf.float32) loss = tf.reduce_sum(tf.square(self.out - self.nextQ)) trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1) self.updatemodel = trainer.minimize(loss) init = tf.global_variables_initializer() self.sess = tf.Session() self.sess.run(init)
def __init__(self, N): self.T = Disk(N) self.H = Disk(N) self.N = N self.marked = set() self.G = {} ## local access graph self.is_first_request = True self.last_request = -1 self.page_probability = {}
def __init__(self, N): self.N = N self.Cache = Disk(N, name='Cache') self.Hist1 = Disk(N, name='Hist1') self.Hist2 = Disk(N, name='Hist2') self.Hist3 = Disk(N, name='Hist3') ## Config variables self.decayRate = 0.99 self.epsilon = 0.90 ## Learning rate self.lamb = 0.05 self.learning_phase = 2 * N self.error_discount_rate = (0.005)**(1.0 / N) ## State Variables self.learning = True self.policy = 0 self.accessedTime = {} self.frequency = {} self.accessedSinceInCache = {} self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} self.currentPolicy = np.random.randint(0, 3) self.time = 0 self.learning = True self.leaningPhaseCount = 1 self.W = np.array([1.0 / 3, 1.0 / 3, 1.0 / 3], dtype=np.float32) self.P = 0 self.currentQ = np.zeros(3) self.X = np.array([]) self.Y1 = np.array([]) self.Y2 = np.array([]) self.Y3 = np.array([])
class LFU_DECAY(page_replacement_algorithm): def __init__(self, N, decay=0.99): self.T = Disk(N) self.N = N self.frequency = {} self.decayRate = decay def get_N(self): return self.N def getMinValueFromCache(self, values): minpage, first = -1, True for q in self.T: if first or values[q] < values[minpage]: minpage, first = q, False return minpage def request(self, page): page_fault = False if page in self.T: page_fault = False else: #if len(self.T) == self.N : if self.T.size() == self.N: ## Remove LRU page lfu = self.getMinValueFromCache(self.frequency) self.T.delete(lfu) del self.frequency[lfu] # Add page to the MRU position self.frequency[page] = 0 self.T.add(page) page_fault = True for q in self.T: self.frequency[q] *= self.decayRate self.frequency[page] += 1 return page_fault def get_data(self): # data = [] # for i,p,m in enumerate(self.T): # data.append((p,m,i,0)) # return data return [list(self.freq)] def get_list_labels(self): return ['L']
def __init__(self, N): self.T = [] self.N = N self.T1 = Disk(N) self.T2 = Disk(N) self.B1 = Disk(N) self.B2 = Disk(2 * N) self.P = 0
def __init__(self, N): self.T = Disk(N) self.H = Disk(N) self.N = N self.marked = set() self.G = {} ## local access graph self.is_first_request = True self.last_request = -1 self.page_probability = {} self.fast_mode = True self.use_weights = False self.weights = {}
def __init__(self, N): self.T = Disk(N) self.H = Disk(N) self.N = N self.marked = set() self.G = Graph() ## local access graph self.is_first_request = True self.last_request = -1 self.hitting_time = {} self.fast_mode = True self.use_weights = False self.weights = {}
def __init__(self, N): self.N = N self.CacheRecency = Disk(N) self.CacheFrequecy = priorityqueue(N) self.Hist1 = Disk(N) self.Hist2 = priorityqueue(N) ## Config variables self.decayRate = 1 self.epsilon = 0.90 self.lamb = 0.05 self.learning_phase = N/2 self.error_discount_rate = (0.005)**(1.0/N) # self.error_discount_rate = 1 ## self.learning = True self.policy = 0 self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} self.freq = {} ## TODO add decay_time and decay_factor self.decay_time = N self.decay_factor = 1 ## Accounting variables self.time = 0 self.W = np.zeros((10,2)) self.W[:,:] = 0.5 self.X = np.array([],dtype=np.int32) self.Y1 = np.array([]) self.Y2 = np.array([]) ### self.q = Queue.Queue() self.sum = 0 self.NewPages = []
def __init__(self, N): self.N = N self.T1 = Disk(N) self.T2 = Disk(N) self.B1 = Disk(N) self.B2 = Disk(2 * N) self.P = 0 self.time = 0 self.X = [] self.Y = [] self.unique = {} self.unique_cnt = 0 self.pollution_dat_x = [] self.pollution_dat_y = []
def __init__(self, N, traces): self.T = [] self.N = N self.T1 = Disk(N) self.T2 = Disk(N) self.B1 = Disk(N) self.B2 = Disk(2 * N) self.P = 0 self.page_request_time = {} ## for i, p in enumerate(traces): if p not in self.page_request_time: self.page_request_time[p] = Queue.Queue() self.page_request_time[p].put(i)
def __init__(self, N): self.T = [] self.N = N self.disk = Disk(N) self.freq = {} ## Training variables self.X, self.Y = [], [] self.reward = [] self.regret = [] ## Config variables self.batchsize = N self.numbatch = 5 ## Aux variables self.hist = queue.deque() self.Xbuff = queue.deque() self.Ybuff = queue.deque() self.pageHitBuff = deque() self.current = 0 self.action = 1 self.currentPageHits = 0
def __init__(self, param): assert 'cache_size' in param self.N = param['cache_size'] self.T1 = Disk(self.N) self.T2 = Disk(self.N) self.B1 = Disk(self.N) self.B2 = Disk(2 * self.N) self.P = 0 self.time = 0 self.X = [] self.Y = [] self.unique = {} self.unique_cnt = 0 self.pollution_dat_x = [] self.pollution_dat_y = []
def __init__(self, N): self.T = [] self.N = N self.lru = Disk(N) self.freq = {} self.request_time = {} self.marked = set() self.lru_regret = 0 self.lfu_regret = 0 self.expert_cost = 0 self.lru_cost = 0 self.lfu_cost = 0 self.LRU = LRU(N) self.LFU = LFU_DECAY(N) self.current_time = 0
def __init__(self, N): self.N = N self.T1 = Disk(N) self.T2 = Disk(N) self.Hist1 = Disk(N) self.Hist2 = Disk(N) ## Config variables self.epsilon = 0.90 self.lamb = 0.05 self.error_discount_rate = (0.005)**(1.0 / N) # self.learning_phase = N/2 # self.error_discount_rate = 1 ## self.policy = 0 self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} self.freq = {} ## TODO add decay_time and decay_factor self.decay_time = N self.decay_factor = 1 ## Accounting variables self.time = 0 self.W = np.array([.5, .5], dtype=np.float32) self.X = np.array([], dtype=np.int32) self.Y1 = np.array([]) self.Y2 = np.array([]) ### self.q = Queue.Queue() self.sum = 0 self.NewPages = []
class LaCReME(page_replacement_algorithm): def __init__(self, N): self.N = N self.CacheRecency = Disk(N) self.CacheFrequecy = priorityqueue(N) self.Hist1 = Disk(N) self.Hist2 = priorityqueue(N) ## Config variables self.decayRate = 1 self.epsilon = 0.05 self.lamb = 0.05 self.learning_phase = N / 2 # self.error_discount_rate = (0.005)**(1.0/N) ## TODO ADD BACK self.error_discount_rate = 1 ## self.learning = True self.policy = 0 self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} self.freq = {} ## TODO add decay_time and decay_factor self.decay_time = N self.decay_factor = 1 ## Accounting variables self.time = 0 self.W = np.array([.5, .5], dtype=np.float32) self.X = np.array([], dtype=np.int32) self.Y1 = np.array([]) self.Y2 = np.array([]) def get_N(self): return self.N def visualize(self, plt): # print(np.min(self.X), np.max(self.X)) ax = plt.subplot(2, 1, 1) ax.set_xlim(np.min(self.X), np.max(self.X)) l1, = plt.plot(self.X, self.Y1, 'b-', label='W_lru') l2, = plt.plot(self.X, self.Y2, 'r-', label='W_lfu') return [l1, l2] ########################################## ## Add a page to cache using policy 'poly' ########################################## def addToCache(self, page, pagefreq=0): self.CacheRecency.add(page) self.CacheFrequecy.add(page) self.CacheRecency.increaseCount(page, amount=pagefreq) self.CacheFrequecy.increase(page, amount=pagefreq) ###################### ## Get LFU or LFU page ###################### def selectEvictPage(self, policy): r = self.CacheRecency.getIthPage(0) f = self.CacheFrequecy.peaktop() pageToEvit, policyUsed = None, None if r == f: pageToEvit, policyUsed = r, -1 elif policy == 0: pageToEvit, policyUsed = r, 0 elif policy == 1: pageToEvit, policyUsed = f, 1 return pageToEvit, policyUsed def evictPage(self, pg): self.CacheRecency.delete(pg) self.CacheFrequecy.delete(pg) ############################################################## ## There was a page hit to 'page'. Update the data structures ############################################################## def pageHitUpdate(self, page): if page in self.CacheRecency and page in self.CacheFrequecy: self.CacheRecency.moveBack(page) self.CacheRecency.increaseCount(page) self.CacheFrequecy.increase(page) ######################### ## Get the Q distribution ######################### def getQ(self): return (1 - self.lamb) * self.W + self.lamb * np.ones(2) / 2 ############################################ ## Choose a page based on the q distribution ############################################ def chooseRandom(self): q = self.getQ() r = np.random.rand() for i, p in enumerate(q): if r < p: return i return len(q) - 1 def updateWeight(self, cost): self.W = self.W * (1 - self.epsilon * cost) self.W = self.W / np.sum(self.W) ######################################################################################################################################## ####REQUEST############################################################################################################################# ######################################################################################################################################## def request(self, page): page_fault = False self.time = self.time + 1 # if self.time % self.learning_phase == 0 : # self.learning = not self.learning ##################### ## Visualization data ##################### prob = self.getQ() self.X = np.append(self.X, self.time) self.Y1 = np.append(self.Y1, prob[0]) self.Y2 = np.append(self.Y2, prob[1]) if self.time % self.N == 0: self.CacheFrequecy.decay(self.decay_factor) self.Hist2.decay(self.decay_factor) ########################## ## Process page request ########################## if page in self.CacheFrequecy: page_fault = False self.pageHitUpdate(page) else: ##################################################### ## Learning step: If there is a page fault in history ##################################################### pageevict, histpage_freq = None, 1 policyUsed = -1 if page in self.Hist1: pageevict = page histpage_freq = self.Hist1.getCount(page) self.Hist1.delete(page) policyUsed = 0 self.W[0] = self.W[0] * ( 1 - self.epsilon) if self.policyUsed[page] != -1 else self.W[0] elif page in self.Hist2: pageevict = page histpage_freq = self.Hist2.getCount( page) ## Get the page frequency in history self.Hist2.delete(page) policyUsed = 1 self.W[1] = self.W[1] * ( 1 - self.epsilon) if self.policyUsed[page] != -1 else self.W[1] self.W = self.W / np.sum(self.W) # if pageevict is not None : # q = self.weightsUsed[pageevict] # # err = self.error_discount_rate ** (self.time - self.evictionTime[pageevict]) # err = 1 # reward = np.array([0,0], dtype=np.float32) # if policyUsed == 0 : # LRU # reward[0] = err # if policyUsed == 1: # reward[1] = err # reward_hat = reward ################# ## Update Weights ################# # if self.policyUsed[pageevict] != -1 : # # self.W = self.W * np.exp(self.lamb * reward_hat / 2) # self.W = self.W * (1 - reward*self.epsilon) # self.W = self.W / np.sum(self.W) #################### ## Remove from Cache #################### if self.CacheRecency.size() == self.N: ################ ## Choose Policy ################ # if not self.learning : # act = np.argmax(self.getQ()) # else : # act = self.chooseRandom() act = np.argmax(self.W) ## REMOVE cacheevict, poly = self.selectEvictPage(act) pagefreq = self.CacheFrequecy.getCount(cacheevict) self.policyUsed[cacheevict] = poly self.weightsUsed[cacheevict] = self.getQ() self.evictionTime[cacheevict] = self.time ## TODO ADD BACK # if not self.learning : # self.policyUsed[cacheevict] = -1 ################### ## Evict to history ################### histevict = None if (poly == 0) or (poly == -1 and np.random.rand() < 0.5): if self.Hist1.size() == self.N: histevict = self.Hist1.getIthPage(0) self.Hist1.delete(histevict) self.Hist1.add(cacheevict) self.Hist1.setCount(cacheevict, pagefreq) else: if self.Hist2.size() == self.N: histevict = self.Hist2.popmin() self.Hist2.add(cacheevict) self.Hist2.increase(cacheevict, pagefreq - 1) if histevict is not None: del self.evictionTime[histevict] del self.policyUsed[histevict] del self.weightsUsed[histevict] self.evictPage(cacheevict) self.addToCache(page, pagefreq=histpage_freq) page_fault = True return page_fault def get_list_labels(self): return ['L']
class PAGERANK_MARKING_FAST(page_replacement_algorithm): def __init__(self, N): self.T = Disk(N) self.H = Disk(N) self.N = N self.marked = set() self.G = {} ## local access graph self.last_request = -1 self.first_request = False self.PR = {} def get_N(self) : return self.N def request(self,page) : # print('request: ', page) page_fault = False if not self.first_request : self.__add_edge(self.last_request, page) self.last_request = page self.first_request = False if page in self.T : ## Mark page self.marked.add(page) else : if page in self.H : self.H.delete(page) # Start a new phase when all pages are marked and a page fault occurs # Unmark all the pages if len(self.marked) == self.N : self.marked.clear() self.PR = self.compute_pagerank(page) if self.T.size() == self.N : ## Get the set of unmarked pages U = set(self.T.get_data()) - self.marked # Compute the page rank of all pages # self.PR = self.compute_pagerank(page) ## Choose a page with minimum pagerank least_pagerank_page = -1 for u in U : if least_pagerank_page == -1 or self.PR[u] < self.PR[least_pagerank_page] : least_pagerank_page = u ## Delete page from cache self.T.delete(least_pagerank_page) ## Remove least resent page from history if self.H.size() == self.N : u = self.H.deleteFront() if u is not None and page in self.G : # print('G.pop (',u,')') self.G.pop(u, None) ## Move discarted page to history self.H.add(least_pagerank_page) ## Mark page and add to T self.marked.add(page) self.T.add(page) ## Page fault is True page_fault = True return page_fault def __add_edge(self, u,v) : if u not in self.G : self.G[u] = set() if v not in self.G : self.G[v] = set() self.G[u] = self.G[u] | {v} self.G[v] = self.G[v] | {u} def __get_adj_matrix(self) : ## Mapping node_id = {} node_name = {} for i,node in enumerate(self.G) : node_id[node] = i node_name[i] = node A = np.zeros((len(node_id),len(node_id))) for u in self.G : adj = list(self.G[u]) for v in adj: if v in self.G : u_id = node_id[u] v_id = node_id[v] A[u_id,v_id] = 1 A[v_id,u_id] = 1 else : self.G[u] = self.G[u] - {v} return A,node_id,node_name def __mult_matrix(self,A,n) : B = np.eye(len(A)) while n > 0 : if n % 2 == 1 : B = np.matmul(B,A) A = np.matmul(A,A) n = n / 2 return B def compute_pagerank(self, init_page) : A, node_id, node_name = self.__get_adj_matrix() u = node_id[init_page] n = len(A) ## Transportation vector E = np.zeros(n) E[u] = 1 # ranks_per_page = pr.compute(A,teleport_vector=tv) pr = Pagerank() R = pr.compute_local(A,E) PR = {} for v,pr in enumerate(R) : PR[node_name[v]] = pr return PR def page_label(self,page): lab = "%s(%.1f)" % (page, self.PR[page] if page in self.PR else 0) return lab def page_color(self,page) : if page in self.marked : return 1 ## Red else : return 0 # white def debug(self) : X = [] for u in self.get_data() : X.append((self.P[u],u)) def get_data(self): # data = [] # for i,p,m in enumerate(self.T): # data.append((p,m,i,0)) # return data return [self.T.get_data(), self.H.get_data()]
class ARCOPT(page_replacement_algorithm): def __init__(self, N, traces): self.T = [] self.N = N self.T1 = Disk(N) self.T2 = Disk(N) self.B1 = Disk(N) self.B2 = Disk(2 * N) self.P = 0 self.page_request_time = {} ## for i, p in enumerate(traces): if p not in self.page_request_time: self.page_request_time[p] = Queue.Queue() self.page_request_time[p].put(i) def get_N(self): return self.N def request(self, page): x = self.page_request_time[page].get() #print self.T1.size(), self.T2.size() page_fault = False #if inList(self.T, page): if self.T1.inDisk(page) or self.T2.inDisk(page): #self.T = moveToMRU(self.T,page) if page in self.T1: self.T1.delete(page) if page in self.T2: self.T2.delete(page) if not self.T2.add(page): print('failed adding at Case 1') elif self.B1.inDisk(page): self.__replace(page) self.B1.delete(page) if not self.T2.add(page): print('failed adding at B1') page_fault = True elif self.B2.inDisk(page): self.__replace(page) self.B2.delete(page) if not self.T2.add(page): print('failed adding at B2') page_fault = True else: t1 = self.T1.size() t2 = self.T2.size() b1 = self.B1.size() b2 = self.B2.size() if t1 + b1 == self.N: if t1 < self.N: self.B1.deleteFront() self.__replace(page) else: self.T1.deleteFront() elif t1 + b1 < self.N: if t1 + t2 + b1 + b2 >= self.N: if t1 + t2 + b1 + b2 == 2 * self.N: self.B2.deleteFront() self.__replace(page) # Add page to the MRU position in T1 # self.T.append(page) if not self.T1.add(page): print('failed adding at case 4') page_fault = True return page_fault def __replace(self, x): if self.T1.size() == 0: y = self.T2.deleteFront() if not y == None: self.B2.add(y) elif self.T2.size() == 0: y = self.T1.deleteFront() if not y == None: self.B1.add(y) else: t1_page = self.T1.getIthPage(0) t2_page = self.T2.getIthPage(0) if not self.page_request_time[t1_page].empty(): page1_time = self.page_request_time[t1_page].queue[0] else: page1_time = int(1e15) if not self.page_request_time[t2_page].empty(): page2_time = self.page_request_time[t2_page].queue[0] else: page2_time = int(1e15) if page1_time > page2_time: y = self.T2.deleteFront() if not y == None: self.B2.add(y) else: y = self.T1.deleteFront() if not y == None: self.B1.add(y) def get_data(self): return [ self.T1.get_data(), self.T2.get_data(), self.B1.get_data(), self.B2.get_data() ] def get_list_labels(self): return ['T1', 'T2', 'B1', 'B2']
class TWO_LIST_MARKING: def __init__(self, N): self.M1 = Disk(N) self.M2 = Disk(N) self.U1 = Disk(N) self.U2 = Disk(N) self.B1 = Disk(N) self.B2 = Disk(N) self.P = 0 self.N = N def get_N(self): return self.N def request(self, page): pageFault = False if self.M1.inDisk(page) or self.M2.inDisk(page) or self.U1.inDisk( page) or self.U2.inDisk(page): ## Remove from the list self.M1.delete(page) self.M2.delete(page) self.U1.delete(page) self.U2.delete(page) ## Move to M2 self.M2.add(page) else: pageFault = True ## Start a new phase when all pages are marked and a page fault occurs if self.M1.size() + self.M2.size() == self.N: m1_data = self.M1.getData() m2_data = self.M2.getData() for x in m1_data: self.M1.delete(x) self.U1.add(x) for x in m2_data: self.M2.delete(x) self.U2.add(x) ## If page is in history then update P ## u = u1 + u2 ## 0 <= p <= u / u1 ## p(u1) = p / u ## p(u2) = (u - p*u1)/(u*u2) u1 = self.U1.size() u2 = self.U2.size() u = u1 + u2 if self.B1.inDisk(page): if u1 > 0: self.P += 1.0 * u2 / u1 else: self.P += 0.5 if u1 > 0 and self.P > (u / u1): self.P = (u / u1) self.B1.delete(page) elif self.B2.inDisk(page): if u2 > 0: self.P -= 1.0 * u1 / u2 else: self.P -= 0.5 if self.P < 0: self.P = 0 self.B2.delete(page) if self.M1.size() + self.M2.size() + self.U1.size() + self.U2.size( ) == self.N: # Evict a page U1 = self.U1.getData() U2 = self.U2.getData() if u1 == 0: p1 = 0 p2 = 1.0 / u elif u2 == 0: p1 = 1.0 / u p2 = 0 else: p1 = self.P / u # Probability of choosing a page in U1 p2 = (u - self.P * u1) / ( u * u2) # Probability of choosing a page in U2 ## Calculate probability distribution P = [0 for i in range(0, self.N)] for i, u in enumerate(U1): P[i] = p1 if i > 0: P[i] += P[i - 1] for i, u in enumerate(U2): P[i + u1] = p2 if i + u1 > 0: P[i + u1] += P[i + u1 - 1] ## Choose a page a random ran = random.random() U = U1 + U2 for i, u in enumerate(U): if ran < P[i]: self.U1.delete(u) self.U2.delete(u) evicted = u if i < u1: inU1 = True else: inU1 = False break if inU1: if self.B1.size() == self.N: self.B1.deleteFront() self.B1.add(evicted) else: if self.B2.size() == self.N: self.B2.deleteFront() self.B2.add(evicted) ## Add new page to M1 self.M1.add(page) return pageFault def getData(self): m1 = [] m2 = [] u1 = [] u2 = [] b1 = [] b2 = [] for m in self.M1.getData(): m1.append((m, 1)) for m in self.M2.getData(): m2.append((m, 3)) for u in self.U1.getData(): u1.append((u, 0)) for u in self.U2.getData(): u2.append((u, 2)) for m in self.B1.getData(): b1.append(m) for m in self.B2.getData(): b2.append(m) return [u1 + m1 + u2 + m2, b1, b2]
class LaCReME_T1T2(page_replacement_algorithm): def __init__(self, N): self.N = N self.T1 = Disk(N) self.T2 = Disk(N) self.Hist1 = Disk(N) self.Hist2 = Disk(N) ## Config variables self.epsilon = 0.90 self.lamb = 0.05 self.error_discount_rate = (0.005)**(1.0 / N) # self.learning_phase = N/2 # self.error_discount_rate = 1 ## self.policy = 0 self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} self.freq = {} ## TODO add decay_time and decay_factor self.decay_time = N self.decay_factor = 1 ## Accounting variables self.time = 0 self.W = np.array([.5, .5], dtype=np.float32) self.X = np.array([], dtype=np.int32) self.Y1 = np.array([]) self.Y2 = np.array([]) ### self.q = Queue.Queue() self.sum = 0 self.NewPages = [] def get_N(self): return self.N def visualize(self, plt): # print(np.min(self.X), np.max(self.X)) ax = plt.subplot(2, 1, 1) ax.set_xlim(np.min(self.X), np.max(self.X)) l1, = plt.plot(self.X, self.Y1, 'b-', label='W_lru') l2, = plt.plot(self.X, self.Y2, 'r-', label='W_lfu') l3, = plt.plot(self.X, self.NewPages, 'g-', label='New Pages', alpha=0.6) return [l1, l2, l3] ############################################################## ## There was a page hit to 'page'. Update the data structures ############################################################## def pageHitUpdate(self, page): if page in self.T1: self.T1.delete(page) self.T2.add(page) else: self.T2.moveBack(page) ######################### ## Get the Q distribution ######################### def getQ(self): return (1 - self.lamb) * self.W + self.lamb * np.ones(2) / 2 ############################################ ## Choose a page based on the q distribution ############################################ def chooseRandom(self): q = self.getQ() r = np.random.rand() for i, p in enumerate(q): if r < p: return i return len(q) - 1 def updateWeight(self, cost): self.W = self.W * (1 - self.epsilon * cost) self.W = self.W / np.sum(self.W) ######################################################################################################################################## ####REQUEST############################################################################################################################# ######################################################################################################################################## def request(self, page): page_fault = False self.time = self.time + 1 # if self.time % self.learning_phase == 0 : # self.learning = not self.learning ##################### ## Visualization data ##################### prob = self.getQ() self.X = np.append(self.X, self.time) self.Y1 = np.append(self.Y1, prob[0]) self.Y2 = np.append(self.Y2, prob[1]) notInHistory = 0 ########################## ## Process page request ########################## t1 = self.T1.size() t2 = self.T2.size() assert t1 + t2 <= self.N if page in self.T1 or page in self.T2: page_fault = False self.pageHitUpdate(page) else: ##################################################### ## Learning step: If there is a page fault in history ##################################################### pageevict = None inHist = False policyUsed = -1 if page in self.Hist1: pageevict = page self.Hist1.delete(page) policyUsed = 0 inHist = True elif page in self.Hist2: pageevict = page self.Hist2.delete(page) policyUsed = 1 inHist = True else: notInHistory = 1 if pageevict is not None: q = self.weightsUsed[pageevict] # err = self.error_discount_rate ** (self.time - self.evictionTime[pageevict]) err = 1 reward = np.array([0, 0], dtype=np.float32) if policyUsed == 0: # LRU reward[1] = err if policyUsed == 1: reward[0] = err reward_hat = reward / q ################# ## Update Weights ################# if self.policyUsed[pageevict] != -1: self.W = self.W * np.exp(self.lamb * reward_hat / 2) self.W = self.W / np.sum(self.W) #################### ## Remove from Cache #################### if t1 + t2 == self.N: ################ ## Choose Policy ################ act = self.chooseRandom() if t1 == self.N or (act == 0 and t1 > 0): cacheevict = self.T1.popFront() else: cacheevict = self.T2.popFront() self.policyUsed[cacheevict] = act self.weightsUsed[cacheevict] = self.getQ() self.evictionTime[cacheevict] = self.time ################### ## Evict to history ################### histevict = None if act == 0: if self.Hist1.size() == self.N: histevict = self.Hist1.getFront() self.Hist1.delete(histevict) self.Hist1.add(cacheevict) else: if self.Hist2.size() == self.N: histevict = self.Hist2.getFront() self.Hist2.delete(histevict) self.Hist2.add(cacheevict) if histevict is not None: del self.evictionTime[histevict] del self.policyUsed[histevict] del self.weightsUsed[histevict] if inHist: self.T2.add(page) else: self.T1.add(page) page_fault = True self.q.put(notInHistory) self.sum += notInHistory if self.q.qsize() > self.N: self.sum -= self.q.get() self.NewPages.append(1.0 * self.sum / self.N) return page_fault def get_list_labels(self): return ['L']
class BANDIT_DOUBLE_HIST(page_replacement_algorithm): def __init__(self, N): self.N = N self.Cache = Disk(N) self.Hist1 = Disk(N) self.Hist2 = Disk(N) ## Config variables self.decayRate = 0.99 self.epsilon = 0.95 self.lamb = 0.05 self.learning_phase = N / 2 self.error_discount_rate = (0.005)**(1.0 / N) ## self.learning = True self.policy = 0 self.accessedTime = {} self.frequency = {} self.evictionTime = {} self.policyUsed = {} self.weightsUsed = {} ## Accounting variables self.time = 0 self.W = np.array([.5, .5], dtype=np.float32) self.X = np.array([]) self.Y1 = np.array([]) self.Y2 = np.array([]) def get_N(self): return self.N def visualize(self, plt): print('visualize') l1, = plt.plot(self.X, self.Y1, 'b-', label='W_lru') l2, = plt.plot(self.X, self.Y2, 'r-', label='W_lfu') plt.xlabel('time') plt.ylabel('Weight') plt.legend(handles=[l1, l2]) # plt.show() # print('W = ', self.W) def __keyWithMinVal(self, d): v = list(d.values()) k = list(d.keys()) return k[v.index(min(v))] def getMinValueFromCache(self, values): minpage, first = -1, True for q in self.Cache: if first or values[q] < values[minpage]: minpage, first = q, False return minpage def selectEvictPage(self, policy): r = self.getMinValueFromCache(self.accessedTime) f = self.getMinValueFromCache(self.frequency) # if r == f : # return r,-1 if policy == 0: return r, 0 return f, 1 def countUniquePagesSince(self, t): cnt = 0 for p in self.Cache: if self.accessedTime[p] > t: cnt += 1 for p in self.Hist1: if self.accessedTime[p] > t: cnt += 1 for p in self.Hist2: if self.accessedTime[p] > t: cnt += 1 return cnt def getQ(self): return (1 - self.lamb) * self.W + self.lamb * np.ones(2) / 2 # return self.W def chooseRandom(self): q = self.getQ() r = np.random.rand() # if self.time < 10000 + 1751 and self.time > 1751: # print('r = ', r, 'q = ', q) for i, p in enumerate(q): if r < p: return i return len(q) - 1 def updateWeight(self, cost): self.W = self.W * (1 - self.epsilon * cost) self.W = self.W / np.sum(self.W) ######################################################################################################################################## ####REQUEST############################################################################################################################# ######################################################################################################################################## def request(self, page): page_fault = False self.time = self.time + 1 ############################ ## Save data for training ## ############################ if self.time % self.learning_phase == 0: self.learning = not self.learning ## Visualization data prob = self.getQ() self.X = np.append(self.X, self.time) self.Y1 = np.append(self.Y1, prob[0]) self.Y2 = np.append(self.Y2, prob[1]) ######################### ## Process page reques ## ######################### if page in self.Cache: page_fault = False else: pageevict = None policyUsed = -1 if page in self.Hist1: pageevict = page self.Hist1.delete(page) policyUsed = 0 elif page in self.Hist2: pageevict = page self.Hist2.delete(page) policyUsed = 1 if pageevict is not None: q = self.weightsUsed[pageevict] err = self.error_discount_rate**(self.time - self.evictionTime[pageevict]) reward = np.array([0, 0], dtype=np.float32) if policyUsed == 0: reward[1] = err if policyUsed == 1: reward[0] = err reward_hat = reward / q # print('self.policyUsed[%d] = %d' % (pageevict,self.policyUsed[pageevict] )) ## Update Weights if self.policyUsed[pageevict] != -1: # print('Updating weights') self.W = self.W * np.exp(self.lamb * reward_hat / 2) self.W = self.W / np.sum(self.W) ## Remove from Cache if self.Cache.size() == self.N: if not self.learning: act = np.argmax(self.getQ()) else: act = self.chooseRandom() # act = self.chooseRandom() cacheevict, poly = self.selectEvictPage(act) self.policyUsed[cacheevict] = poly # if self.time < 10000 + 1751 and self.time > 1751: # if act == 1 : # print('LFU') if not self.learning: self.policyUsed[cacheevict] = -1 self.Cache.delete(cacheevict) self.weightsUsed[cacheevict] = self.getQ() self.evictionTime[cacheevict] = self.time histevict = -1 if act == 0: if self.Hist1.size() == self.N: histevict = self.Hist1.getIthPage(0) self.Hist1.delete(histevict) self.Hist1.add(cacheevict) # print('Adding %d to hist1' % cacheevict) if act == 1: if self.Hist2.size() == self.N: histevict = self.Hist2.getIthPage(0) self.Hist2.delete(histevict) self.Hist2.add(cacheevict) # print('Adding %d to hist2' % cacheevict) if histevict != -1: del self.evictionTime[histevict] del self.accessedTime[histevict] del self.frequency[histevict] del self.policyUsed[histevict] del self.weightsUsed[histevict] # print('act = ', act) # self.Hist.add(evictPage) if page not in self.frequency: self.frequency[page] = 0 self.Cache.add(page) page_fault = True for q in self.Cache: self.frequency[q] *= self.decayRate self.frequency[page] += 1 self.accessedTime[page] = self.time return page_fault def get_list_labels(self): return ['L']
def __init__(self, N): self.M1 = Disk(N) self.M2 = Disk(N) self.U1 = Disk(N) self.U2 = Disk(N) self.B1 = Disk(N) self.B2 = Disk(N) self.P = 0 self.N = N
class ExpertLearning_v2(page_replacement_algorithm): def __init__(self, N): self.T = [] self.N = N self.disk = Disk(N) self.freq = {} ## Training variables self.X, self.Y = [], [] self.reward = [] self.regret = [] ## Config variables self.batchsize = N self.numbatch = 5 self.discountrate = 0.9 self.error = 0.5 self.reduceErrorRate = 0.975 ## Aux variables self.cachebuff = dequecustom() self.Xbuff = dequecustom() self.Ybuff = dequecustom() self.pageHitBuff = dequecustom() self.hist = dequecustom() self.batchsizeBuff = dequecustom() ## Accounting variables self.currentPageHits = 0 self.current = 0 self.uniquePages = Counter() ## Batch action variable self.action = [0] #self.discount = 0.9 #self.sampleCount = 0 #self.trainingSampleSize = 5 * N ## start tf tf.reset_default_graph() self.input = tf.placeholder(shape=[1, self.N], dtype=tf.float32) W1 = tf.Variable(tf.random_uniform([self.N, 8], 0, 0.01)) out1 = tf.sigmoid(tf.matmul(self.input, W1)) W2 = tf.Variable(tf.random_uniform([8, 2], 0, 0.01)) self.out = tf.matmul(out1, W2) self.predictaction = tf.argmax(self.out) self.nextQ = tf.placeholder(shape=[1, 2], dtype=tf.float32) loss = tf.reduce_sum(tf.square(self.out - self.nextQ)) trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1) self.updatemodel = trainer.minimize(loss) init = tf.global_variables_initializer() self.sess = tf.Session() self.sess.run(init) def get_N(self): return self.N def __keyWithMinVal(self, d): v = list(d.values()) k = list(d.keys()) return k[v.index(min(v))] def __discountedReward(self, reward): discounted_reward = np.zeros(len(reward)) rsum = 0 for t in reversed(range(0, len(reward))): rsum = self.discount * rsum + reward[t] discounted_reward[t] = rsum return discounted_reward def __getRegret(self): cache = set(self.cachebuff.getleft()) requestSequence = list(self.hist) ## Compute distance dist = {} for j, p in enumerate(requestSequence): if p not in dist: dist[p] = dequecustom() dist[p].append(j) discountedregret = 0 i = 0 batchid = 0 optsum = 0 hitsum = 0 for hits, sz in zip(self.pageHitBuff, self.batchsizeBuff): opthits = 0 batchid += 1 for _ in range(0, sz): p = requestSequence[i] i += 1 if p in cache: opthits += 1 else: if len(cache) >= self.N: rem = 'xxxxxxxxxxxxx' for c in cache: if c not in dist or len(dist[c]) == 0: rem = c break if rem not in dist or dist[c].getleft( ) > dist[rem].getleft(): rem = c ## Evict from cache cache = cache - {rem} ## Add page to cache cache = cache | {p} ## Pop from dist dist[p].popleft() regret = opthits - hits discountedregret = discountedregret + regret * (0.9)**(batchid - 1) optsum += opthits hitsum += hits break return discountedregret def getState(self): x = np.zeros(self.N, np.float32) for i, page in enumerate(self.disk): x[i] = 1.0 * self.freq[page] if np.sum(x) > 0.00001: x = x / np.sum(x) return x ######################################################################################################################################## ####REQUEST############################################################################################################################# ######################################################################################################################################## def request(self, page): page_fault = False ############################ ## Save data for training ## ############################ if len(self.uniquePages) == 0: ## Compute regret for the first batch if len(self.Xbuff) >= self.numbatch: r = self.__getRegret() cache = self.cachebuff.popleft() s1 = np.array(self.Xbuff.popleft()) s2 = np.array(self.Xbuff.getleft()) act = self.Ybuff.popleft() hits = self.pageHitBuff.popleft() sz = self.batchsizeBuff.popleft() for _ in range(0, sz): temp = self.hist.popleft() ############################################################################################################################# ## Train here ############################################################################################################### ############################################################################################################################# allq = self.sess.run(self.out, feed_dict={self.input: s1}) nextq = self.sess.run(self.out, feed_dict={self.input: s2}) Qmax = np.max(nextq) targetQ = allq targetQ[0, act[0]] = r + self.discountrate * Qmax _ = self.sess.run(self.updatemodel, feed_dict={ self.input: s1, self.nextQ: targetQ }) #self.error = self.error * self.reduceErrorRate ##################### ## Choose randomly ## ##################### state = np.array([self.getState()]) #print(state) self.action = self.sess.run(self.predictaction, feed_dict={self.input: state}) if np.random.rand() < self.error: self.action[0] = 0 if np.random.rand() < 0.5 else 1 self.cachebuff.append(self.disk.getData()) self.Xbuff.append(state) self.Ybuff.append(self.action) ######################### ## Process page reques ## ######################### if self.disk.inDisk(page): self.disk.moveBack(page) self.freq[page] += 1 self.currentPageHits += 1 else: if self.disk.size() == self.N: if self.action[0] == 0: ## Remove LRU page lru = self.disk.getIthPage(0) self.disk.delete(lru) del self.freq[lru] elif self.action[0] == 1: ## Remove LFU page lfu = self.__keyWithMinVal(self.freq) self.disk.delete(lfu) del self.freq[lfu] # Add page to the MRU position self.disk.add(page) self.freq[page] = 1 page_fault = True #self.uniquePages = self.uniquePages | {page} self.uniquePages.update({page: 1}) ## Store page hits for current batch if len(self.uniquePages) == self.N: self.pageHitBuff.append(self.currentPageHits) self.batchsizeBuff.append(sum(self.uniquePages.values())) ## Reset variables self.uniquePages.clear() self.currentPageHits = 0 self.hist.append(page) return page_fault def get_data(self): # data = [] # for i,p,m in enumerate(self.T): # data.append((p,m,i,0)) # return data return [self.disk.get_data()] def get_list_labels(self): return ['L']
class ARC(page_replacement_algorithm): def __init__(self, N): self.N = N self.T1 = Disk(N) self.T2 = Disk(N) self.B1 = Disk(N) self.B2 = Disk(2 * N) self.P = 0 self.time = 0 self.X = [] self.Y = [] self.unique = {} self.unique_cnt = 0 self.pollution_dat_x = [] self.pollution_dat_y = [] def getWeights(self): # return np.array([self. X, self.Y1, self.Y2,self.pollution_dat_x,self.pollution_dat_y ]).T return np.array([self.pollution_dat_x, self.pollution_dat_y]).T def getStats(self): d = {} d['pollution'] = np.array([self.pollution_dat_x, self.pollution_dat_y]).T return d def visualize(self, plt): # l1, = plt.plot(self.X,self.Y,'r-', label='ARC p-value') # return [l1] return [] def get_N(self): return self.N def request(self, page): page_fault = False self.time += 1 # self.X.append(self.time) # self.Y.append(1.0*self.P / self.N) t1 = self.T1.size() t2 = self.T2.size() b1 = self.B1.size() b2 = self.B2.size() assert t1 + t2 <= self.N, 'Error: t1+t2 should not be bigger than self.N. t1+t2=%d+%d=%d' % ( t1, t2, t1 + t2) assert t1 + b1 <= self.N, 'Error: t1+b1 should not be bigger than self.N. t1+b1=%d+%d=%d' % ( t1, b1, t1 + b1) assert t1 + t2 + b1 + b2 <= 2 * self.N, 'Error: t1+t2+b1+b2 should not be bigger than 2*self.N. t1+t2+b1+b2=%d+%d+%d+%d=%d' % ( t1, t2, b1, b2, t1 + t2 + b1 + b2) if page in self.T1 or page in self.T2: if page in self.T1: assert self.T1.delete(page) if page in self.T2: assert self.T2.delete(page) assert self.T2.add(page), 'failed adding to T2 at Case 1' elif self.B1.inDisk(page): if self.B2.size() > self.B1.size(): r = self.B2.size() / self.B1.size() else: r = 1 self.P = min(self.P + r, self.N) self.__replace(page) assert self.B1.delete(page) assert self.T2.add(page), 'failed adding to T2 at case B1' page_fault = True elif self.B2.inDisk(page): if self.B1.size() > self.B2.size(): r = self.B1.size() / self.B2.size() else: r = 1 self.P = max(self.P - r, 0) self.__replace(page) assert self.B2.delete(page) assert self.T2.add(page), 'failed adding to T2 at case B2' page_fault = True else: if t1 + b1 == self.N: if t1 < self.N: assert self.B1.deleteFront( ) is not None, 'Error deleting front of B1' self.__replace(page) else: assert self.T1.deleteFront( ) is not None, 'Error deleting front of T1' elif t1 + b1 < self.N: if t1 + t2 + b1 + b2 >= self.N: if t1 + t2 + b1 + b2 == 2 * self.N: assert self.B2.deleteFront( ) is not None, 'Error deleting front of B2' self.__replace(page) # Add page to the MRU position in T1 assert self.T1.add(page), 'failed adding page to T1 at case 4' page_fault = True if page_fault: self.unique_cnt += 1 self.unique[page] = self.unique_cnt if self.time % self.N == 0: pollution = 0 for pg in self.T1.getData() + self.T2.getData(): if self.unique_cnt - self.unique[pg] >= 2 * self.N: pollution += 1 self.pollution_dat_x.append(self.time) self.pollution_dat_y.append(100 * pollution / self.N) return page_fault def __replace(self, x): if self.T1.size() > 0 and (self.T1.size() > self.P or (self.B1.inDisk(x) and self.T1.size() == int(self.P))): y = self.T1.deleteFront() assert y is not None, 'Error deleting front of T1 in replace (Case 1)' assert self.B1.add( y), 'failed adding page to B1 at replace 1(Case 1)' else: y = self.T2.deleteFront() assert y is not None, 'Error deleting front of T2 in replace (Case 2)' assert self.B2.add( y), 'failed adding page to B2 at replace 1(Case 2)' # s1 = self.T1.size()+self.T2.size() # s2 = self.B1.size()+self.B2.size() # print('sizes = %d + %d + %d + %d = %d + %d = %d' % (self.T1.size(),self.T2.size(),self.B1.size(),self.B2.size(), s1,s2,s1+s2)) # print('failed adding at replace 2 %d ' %y) def get_data(self): return [ self.T1.get_data(), self.T2.get_data(), self.B1.get_data(), self.B2.get_data() ] def get_list_labels(self): return ['T1', 'T2', 'B1', 'B2']
class ARC(page_replacement_algorithm): def __init__(self, N): self.T = [] self.N = N self.T1 = Disk(N) self.T2 = Disk(N) self.B1 = Disk(N) self.B2 = Disk(2 * N) self.P = 0 def get_N(self): return self.N def request(self, page): page_fault = False #if inList(self.T, page): if self.T1.inDisk(page) or self.T2.inDisk(page): #self.T = moveToMRU(self.T,page) if page in self.T1: self.T1.delete(page) if page in self.T2: self.T2.delete(page) if not self.T2.add(page): print('failed adding at Case 1') elif self.B1.inDisk(page): if self.B2.size() > self.B1.size(): r = self.B2.size() / self.B1.size() else: r = 1 self.P = min(self.P + r, self.N) self.__replace(page) self.B1.delete(page) if not self.T2.add(page): print('failed adding at B1') page_fault = True elif self.B2.inDisk(page): if self.B1.size() > self.B2.size(): r = self.B1.size() / self.B2.size() else: r = 1 self.P = min(self.P - r, 0) self.__replace(page) self.B2.delete(page) if not self.T2.add(page): print('failed adding at B2') page_fault = True else: t1 = self.T1.size() t2 = self.T2.size() b1 = self.B1.size() b2 = self.B2.size() if t1 + b1 == self.N: if t1 < self.N: self.B1.deleteFront() self.__replace(page) else: self.T1.deleteFront() elif t1 + b1 < self.N: if t1 + t2 + b1 + b2 >= self.N: if t1 + t2 + b1 + b2 == 2 * self.N: self.B2.deleteFront() self.__replace(page) # Add page to the MRU position in T1 # self.T.append(page) if not self.T1.add(page): print('failed adding at case 4') page_fault = True return page_fault def __replace(self, x): if self.T1.size() > 0 and (self.T1.size() > self.P or (self.B1.inDisk(x) and self.B1.size() == self.P)): y = self.T1.deleteFront() if not y == None: if not self.B1.add(y): print('failed adding at replace 1') else: y = self.T2.deleteFront() if not y == None: if not self.B2.add(y): print('sizes = %d %d %d %d' % (self.T1.size(), self.T2.size(), self.B1.size(), self.B2.size())) print('failed adding at replace 2 %d ' % y) def get_data(self): return [ self.T1.get_data(), self.T2.get_data(), self.B1.get_data(), self.B2.get_data() ] def get_list_labels(self): return ['T1', 'T2', 'B1', 'B2']
class FAR(page_replacement_algorithm): def __init__(self, N): self.T = Disk(N) self.N = N self.marked = set() self.G = {} ## local access graph self.last_request = -1 self.first_request = False def get_N(self): return self.N def request(self, page): # print('request: ', page) page_fault = False if not self.first_request: self.__add_edge(self.last_request, page) self.last_request = page self.first_request = False if page in self.T: ## Mark page self.marked.add(page) else: # Start a new phase when all pages are marked and a page fault occurs # Unmark all the pages if len(self.marked) == self.N: self.marked.clear() if self.T.size() == self.N: ## Get the set of unmarked pages U = set(self.T.get_data()) - self.marked # Compute the page distance # self.PR = self.compute_pagerank(page) dist = self.__distance_bfs(page) furthest_page = -1 first = True for u in U: # print("u = ",u) if first or dist[u] > dist[furthest_page]: furthest_page = u first = False ## Delete page from cache self.T.delete(furthest_page) ## Remove furthest page from history if furthest_page in self.G: # print('G.pop (',u,')') self.G.pop(furthest_page, None) ## Mark page and add to T self.marked.add(page) self.T.add(page) ## Page fault is True page_fault = True return page_fault def __distance_bfs(self, u): q = Q.Queue() dist = {} q.put(u) dist[u] = 0 while not q.empty(): u = q.get() # print("u = ", u) adj = self.G[u] for v in adj: if v not in dist and v in self.G: if v in self.G: dist[v] = dist[u] + 1 q.put(v) else: self.G[u] = self.G[u] - {v} # print("\tv = ", v) return dist def __add_edge(self, u, v): if u not in self.G: self.G[u] = set() if v not in self.G: self.G[v] = set() self.G[u] = self.G[u] | {v} self.G[v] = self.G[v] | {u} def page_label(self, page): lab = "%s" % (page) return lab def page_color(self, page): if page in self.marked: return 1 ## Red else: return 0 # white def get_data(self): # data = [] # for i,p,m in enumerate(self.T): # data.append((p,m,i,0)) # return data return [self.T.get_data()]
class WALK_MARKING_SLOW(page_replacement_algorithm): def __init__(self, N): self.T = Disk(N) self.H = Disk(N) self.N = N self.marked = set() self.G = {} ## local access graph self.is_first_request = True self.last_request = -1 self.page_probability = {} def get_N(self): return self.N def request(self, page): # print('request: ', page) page_fault = False if not self.is_first_request: self.__add_edge(self.last_request, page) self.last_request = page self.is_first_request = False if page in self.T: ## Mark page self.marked.add(page) else: if page in self.H: self.H.delete(page) # Start a new phase when all pages are marked and a page fault occurs # Unmark all the pages if len(self.marked) == self.N: self.marked.clear() if self.T.size() == self.N: self.page_probability = self.__calculate_prob(page) ## Get the set of unmarked pages U = set(self.T.get_data()) - self.marked U_list = list(U) U_dist = [] for u in U_list: U_dist.append(self.page_probability[u]) page_to_evict = random_select_page(U_list, U_dist) ## Delete page from cache self.T.delete(page_to_evict) ## Remove least resent page from history if self.H.size() == self.N: hist_lru = self.H.deleteFront() if hist_lru is not None and hist_lru in self.G: self.G.pop(hist_lru, None) ## Move discarted page to history self.H.add(page_to_evict) ## Mark page and add to T self.marked.add(page) self.T.add(page) ## Page fault is True page_fault = True return page_fault def __add_edge(self, u, v): if u not in self.G: self.G[u] = set() if v not in self.G: self.G[v] = set() self.G[u] = self.G[u] | {v} self.G[v] = self.G[v] | {u} def get_adj_matrix(self): ## Mapping node_id = {} node_name = {} for i, node in enumerate(self.G): node_id[node] = i node_name[i] = node A = np.zeros((len(node_id), len(node_id))) for u in self.G: adj = list(self.G[u]) for v in adj: if v in self.G: u_id = node_id[u] v_id = node_id[v] A[u_id, v_id] = 1 A[v_id, u_id] = 1 else: self.G[u] = self.G[u] - {v} ## Normalize for u in range(len(A)): degree = np.sum(A[u, :]) if degree > 0: A[u, :] /= degree return A, node_id, node_name def __calculate_prob(self, init_page): A, node_id, node_name = self.get_adj_matrix() u = node_id[init_page] n = len(A) M = Markov(A) R = M.random_walk_distribution(u) # print('R = ',R) P = {} for u, p in enumerate(R): # print('PR[%s] = %f' % (node_name[u], pr)) P[node_name[u]] = p return P ###################################################################################################################################### def page_label(self, page): lab = "%s(%.1f)" % (page, self.page_probability[page] if page in self.page_probability else 0) return lab def page_color(self, page): if page in self.marked: return 1 ## Red else: return 0 # white def debug(self): X = [] for u in self.get_data(): X.append((self.P[u], u)) def get_data(self): # data = [] # for i,p,m in enumerate(self.T): # data.append((p,m,i,0)) # return data return [self.T.get_data()]