def process_event(self, time, receiver, content, log, weight): # get all required data locations = self.view.content_locations(content) nearest_replica = min( locations, key=lambda s: sum(self.view.shortest_path(receiver, s))) # Route request to nearest replica self.controller.start_session(time, receiver, content, log, weight) self.controller.forward_request_path(receiver, nearest_replica) self.controller.get_content(nearest_replica) # Now we need to return packet and we have options path = list( reversed(self.view.shortest_path(receiver, nearest_replica))) if self.metacaching == 'LCE': for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v) and not self.view.cache_lookup( v, content): self.controller.put_content(v) elif self.metacaching == 'LCD': copied = False for u, v in path_links(path): self.controller.forward_content_hop(u, v) if not copied and v != receiver and self.view.has_cache(v): self.controller.put_content(v) copied = True else: raise ValueError('Metacaching policy %s not supported' % self.metacaching) self.controller.end_session()
def process_event(self, time, receiver, content, log, weight): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log, weight) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break # No cache hits, get content from source else: self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) # get the cache with maximum betweenness centrality # if there are more than one cache with max betw then pick the one # closer to the receiver max_betw = -1 designated_cache = None for v in path[1:]: if self.view.has_cache(v): if self.betw[v] >= max_betw: max_betw = self.betw[v] designated_cache = v # Forward content for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == designated_cache: self.controller.put_content(v) self.controller.end_session()
def process_event(self, time, receiver, content, log, weight): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log, weight) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) caches = [v for v in path[1:-1] if self.view.has_cache(v)] designated_cache = random.choice(caches) if len(caches) > 0 else None for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == designated_cache: self.controller.put_content(v) self.controller.end_session()
def process_event(self, time, receiver, content, n_segments, time_interval, log): # Start session. self.controller.start_session(time, receiver, content, log) # Check if the receiver has already cached the content, if true, end the session. if self.view.has_cache(receiver): if self.controller.get_content(receiver): self.controller.end_session() return None # Receiver does not cache the content, get all required data. source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route request to original source and queries caches on the path. for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break # No cache hits, get content from source. self.controller.get_content(v) serving_node = v # Route content to receiver. path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v): # Insert content. self.controller.put_content(v) # End session. self.controller.end_session()
def process_event(self, time, receiver, content, n_segments, time_interval, log): # Start session. self.controller.start_session(time, receiver, content, log) # Check if the receiver has already cached the content. if self.view.has_cache(receiver): if self.controller.get_content(receiver): if content % n_segments == 0: self.controller.user_go_offline(receiver, content, n_segments) self.controller.end_session() return None # Receiver does not cache the content, get all required data. content_locations = list(self.view.content_locations(content)) # print ("Content locations: " + str(content_locations) + " for " + str(segment)) destination = content_locations[0] path = self.view.shortest_path(receiver, destination) # Find the nearest content location and the corresponding shortest path. for content_location in content_locations: current_path = self.view.shortest_path(receiver, content_location) if len(current_path) < len(path): path = current_path destination = content_location # Route request to destination. for u, v in path_links(path): self.controller.forward_request_hop(u, v) # Get content from destination. self.controller.get_content(destination) # Route content to receiver. path = list(reversed(path)) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if content % n_segments == 0: self.controller.user_go_offline(receiver, content, n_segments) else: self.controller.put_content(receiver) self.controller.end_session()
def process_event(self, time, receiver, content, log, weight): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log, weight) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) # Leave a copy of the content only in the cache one level down the hit # caching node copied = False for u, v in path_links(path): self.controller.forward_content_hop(u, v) if not copied and v != receiver and self.view.has_cache(v): self.controller.put_content(v) copied = True self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data hop = 0 source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): hop += 1 self.controller.forward_content_hop(u, v) if self.view.has_cache(v): # insert content self.controller.put_content(v, hop=hop, betw=self.betw[v]) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break # No cache hits, get content from source else: self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) # get the cache with maximum betweenness centrality # if there are more than one cache with max betw then pick the one # closer to the receiver max_betw = -1 designated_cache = None for v in path[1:]: if self.view.has_cache(v): if self.betw[v] >= max_betw: max_betw = self.betw[v] designated_cache = v # Forward content for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == designated_cache: self.controller.put_content(v) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data locations = self.view.content_locations(content) nearest_replica = min(locations, key=lambda s: sum(self.view.shortest_path(receiver, s))) # Route request to nearest replica self.controller.start_session(time, receiver, content, log) self.controller.forward_request_path(receiver, nearest_replica) self.controller.get_content(nearest_replica) # Now we need to return packet and we have options path = list(reversed(self.view.shortest_path(receiver, nearest_replica))) if self.metacaching == 'LCE': for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v) and not self.view.cache_lookup(v, content): self.controller.put_content(v) elif self.metacaching == 'LCD': copied = False for u, v in path_links(path): self.controller.forward_content_hop(u, v) if not copied and v != receiver and self.view.has_cache(v): self.controller.put_content(v) copied = True else: raise ValueError('Metacaching policy %s not supported' % self.metacaching) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) caches = [v for v in path[1:-1] if self.view.has_cache(v)] designated_cache = random.choice(caches) if len(caches) > 0 else None for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == designated_cache: self.controller.put_content(v) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) # Leave a copy of the content only in the cache one level down the hit # caching node copied = False for u, v in path_links(path): self.controller.forward_content_hop(u, v) if not copied and v != receiver and self.view.has_cache(v): self.controller.put_content(v) copied = True self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v #path = self.view.shortest_path(receiver,v) #hc = len(path)-2 #print('%s\t%s' % (content, hc)) break else: # path = self.view.shortest_path(receiver,v) # hc = len(path)-2 # if hc == 1: # Miss at the second cache # print('%s\t2' % (content)) # No cache hits, get content from source self.controller.get_content(v) serving_node = v #print('%s\t1\t%s' % (content, v)) # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v): # insert content self.controller.put_content(v) self.controller.end_session()
def process_event(self, time, receiver, content, log): # Start session self.controller.start_session(time, receiver, content, log) # Check if the receiver has already cached the content, if true, end the session. if self.view.has_cache(receiver): if self.controller.get_content(receiver): self.controller.end_session() return None # Receiver does not cache the content, get all required data. source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route request to source. for u, v in path_links(path): self.controller.forward_request_hop(u, v) # Get content from source. self.controller.get_content(source) # Route content to receiver. path = list(reversed(path)) for u, v in path_links(path): self.controller.forward_content_hop(u, v) # If receiver's cache is full, evict a content by random. if self.view.cache_is_full(receiver): self.controller.remove_content_by_random(receiver) # Insert content. self.controller.put_content(receiver) # End session self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) # downloaded_content_locations = self.view.content_locations_of_receivers( content) self.controller.start_session(time, receiver, content, log) #simulation of receivers disappearing offline_node = self.controller.disappearing_users_simulation( downloaded_content_locations) if (offline_node): downloaded_content_locations.remove(offline_node) best_serving_node = '' if self.view.has_cache( receiver ): #checking if the asking node is same as delevering node if self.controller.get_content(receiver): serving_node = receiver self.controller.end_session() self.counter_cache_same_receiver = self.counter_cache_same_receiver + 1 return #print self.counter_same_receiver if len(downloaded_content_locations) > 0: #checks if the list is empty chosen_receiver = random.choice( downloaded_content_locations ) #chooses one random receiver node best_serving_node = chosen_receiver print('cache retrieved from download') else: best_serving_node = source path = self.view.shortest_path(receiver, best_serving_node) # Route requests to either to downloaded content node or source and queries caches on the path for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) caches = [v for v in path[1:-1] if self.view.has_cache(v)] designated_cache = random.choice(caches) if len(caches) > 0 else None for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == designated_cache: self.controller.put_content(v) self.controller.put_content(receiver) #put content on the receiver self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) # source = [21] # TODO : consider 3 kind of paths (green path, associated green path, shortest path) # path = [30, 4, 5, 13, 11, 21] (green_nodes, associated_green_nodes, green_path_dict, associated_green_path_dict) = get_green_info() (green_path, associate_green_path) = set_green_path(green_path_dict, associated_green_path_dict, self.view.model.shortest_path) # print green_nodes # print associate_green_path # green_nodes_list, neighbor_node_list # path = [4, 5, 13, 11, 21] if receiver in green_nodes: print 'green' path = get_green_path(green_path, receiver, source) # path = self.view.shortest_path(receiver, source) elif receiver in associated_green_nodes: print 'associated' # path = self.view.associated_green_path(receiver, source) path = get_green_path(associate_green_path, receiver, source) else: print 'shortest' path = self.view.shortest_path(receiver, source) print path # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) # print 'forward_request_hop', u, v if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v # print 'serving_node', v break # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) print path for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v): # insert content self.controller.put_content(v) self.controller.end_session()
def process_event(self, time, receiver, content, log): #pdb.set_trace() self.maxRqf = 0 self.minRqf = 99999 self.acounter = 0 self.icounter = 0 self.cur_cached = False self.cached_tag1 = False self.cached_tag2 = False source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break if v != source: rqf = self.view.cache_getRqf(v, content) if self.maxRqf < rqf: self.maxRqf = rqf self.acounter = 1 elif self.maxRqf == rqf: self.acounter += 1 if self.minRqf > rqf: self.minRqf = rqf self.icounter = 1 elif self.minRqf == rqf: self.icounter += 1 else: self.controller.get_content(source) serving_node = source # return content back path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v): vva = self.view.cache_getRqf(v, content) if self.maxRqf <= vva: self.acounter -= 1 if not self.cached_tag1 and self.acounter == 0: self.controller.put_content(v) # print("path=%s, 1put in %d"%(path, v)) self.cached_tag1 = True self.cur_cached = True if self.minRqf >= vva: self.icounter -= 1 if not self.cur_cached: if not self.cached_tag2 and self.icounter == 0: self.controller.put_content(v) # print("path=%s, 2put in %d") self._cached_tag2 = True self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data locations = self.view.content_locations(content) #print (receiver, content, locations) nearest_replica = min(locations, key=lambda x: self.distance[receiver][x]) # Route request to nearest replica self.controller.start_session(time, receiver, content, log) if self.implementation == 'ideal': self.controller.forward_request_path(receiver, nearest_replica) elif self.implementation == 'approx_1': # Floods actual request packets paths = { loc: len(self.view.shortest_path(receiver, loc)[:self.radius]) for loc in locations } # TODO: Continue raise NotImplementedError("Not implemented") elif self.implementation == 'approx_2': # Floods meta-request packets # TODO: Continue raise NotImplementedError("Not implemented") else: # Should never reach this block anyway raise ValueError("Implementation %s not supported" % str(self.implementation)) self.controller.get_content(nearest_replica) # Now we need to return packet and we have options path = list( reversed(self.view.shortest_path(receiver, nearest_replica))) if self.metacaching == 'LCE': for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v) and not self.view.cache_lookup( v, content): self.controller.put_content(v) elif self.metacaching == 'LCD': copied = False for u, v in path_links(path): self.controller.forward_content_hop(u, v) if not copied and v == receiver and self.view.has_cache(v): self.controller.put_content(v) copied = True #print ("pushed content to node :%d when receiver is :%d") % (v,receiver) else: raise ValueError('Metacaching policy %s not supported' % self.metacaching) self.controller.end_session()
def process_event(self, time, receiver, content, log): # decrement popularity score and update internal clock if time - self.clock >= 120: dec = (time - self.clock) * self.dec_per_sec self.controller.decrement(dec, time) self.clock = time # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] self.controller.forward_request_hop(u, v) if self.view.has_cache(v): self.controller.cache_recent_update(v, time) if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) self.controller.check_local_p(v) self.controller.end_session()
def process_event(self, time, receiver, content, log): # decrement popularity score and update internal clock if time - self.clock >= 60: dec = (time - self.clock)*self.dec_per_sec self.controller.decrement(dec, time) self.clock = time # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] self.controller.forward_request_hop(u, v) if self.view.has_cache(v): self.controller.cache_recent_update(v,time) if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) self.controller.check_neighbours_threshold(u) self.controller.end_session()
def process_event(self, time, receiver, content, log, weight): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log, weight) edge_cache = None for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): edge_cache = v if self.controller.get_content(v): serving_node = v else: # Cache miss, get content from source self.controller.forward_request_path(v, source) self.controller.get_content(source) serving_node = source break else: # No caches on the path at all, get it from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) self.controller.forward_content_path(serving_node, receiver, path) if serving_node == source: self.controller.put_content(edge_cache) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) edge_cache = None for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): edge_cache = v if self.controller.get_content(v): serving_node = v else: # Cache miss, get content from source self.controller.forward_request_path(v, source) self.controller.get_content(source) serving_node = source break else: # No caches on the path at all, get it from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) self.controller.forward_content_path(serving_node, receiver, path) if serving_node == source: self.controller.put_content(edge_cache) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data locations = self.view.content_locations(content) nearest_replica = min(locations, key=lambda x: self.distance[receiver][x]) # Route request to nearest replica self.controller.start_session(time, receiver, content, log) if self.implementation == 'ideal': self.controller.forward_request_path(receiver, nearest_replica) elif self.implementation == 'approx_1': # Floods actual request packets paths = {loc: len(self.view.shortest_path(receiver, loc)[:self.radius]) for loc in locations} # TODO: Continue raise NotImplementedError("Not implemented") elif self.implementation == 'approx_2': # Floods meta-request packets # TODO: Continue raise NotImplementedError("Not implemented") else: # Should never reach this block anyway raise ValueError("Implementation %s not supported" % str(self.implementation)) self.controller.get_content(nearest_replica) # Now we need to return packet and we have options path = list(reversed(self.view.shortest_path(receiver, nearest_replica))) if self.metacaching == 'LCE': for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v) and not self.view.cache_lookup(v, content): self.controller.put_content(v) elif self.metacaching == 'LCD': copied = False for u, v in path_links(path): self.controller.forward_content_hop(u, v) if not copied and v != receiver and self.view.has_cache(v): self.controller.put_content(v) copied = True else: raise ValueError('Metacaching policy %s not supported' % self.metacaching) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v != receiver and self.view.has_cache(v): # actual pCASTING logic cache_size = self.cache_size[v] dump = self.view.cache_dump(v) occupied = len(dump) assert occupied <= cache_size oc = occupied / cache_size # cache occupancy # content must look like this: /agent1/movement1/movement/1520034756231/1520044926135 lastWrite = float(content.split('/')[-2]) nextWrite = float(content.split('/')[-1]) # could be inf f = nextWrite - lastWrite time = float(time) fr = 1 - ( (time - lastWrite) / f) # 0 <= residual lifetime <= 1 probability = 0.5 * (1 - oc) + 0.5 * fr assert probability <= 1 if random.random() < probability: self.controller.put_content(v) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v): # insert content self.controller.put_content(v) self.controller.end_session()
def forward_content_path(self, u, v, path=None, main_path=True): """Forward a content from node *s* to node *t* over the provided path. Parameters ---------- s : any hashable type Origin node t : any hashable type Destination node path : list, optional The path to use. If not provided, shortest path is used """ if path is None: path = self.model.shortest_path[u][v] for u, v in path_links(path): self.forward_content_hop(u, v)
def forward_request_path(self, s, t, path=None, main_path=True): """Forward a request from node *s* to node *t* over the provided path. Parameters ---------- s : any hashable type Origin node t : any hashable type Destination node path : list, optional The path to use. If not provided, shortest path is used main_path : bool, optional If *True*, indicates that link path is on the main path that will lead to hit a content. It is normally used to calculate latency correctly in multicast cases. Default value is *True* """ if path is None: path = self.model.shortest_path[s][t] for u, v in path_links(path): self.forward_request_hop(u, v, main_path)
def forward_content_path(self, u, v, path=None, main_path=True): """Forward a content from node *s* to node *t* over the provided path. Parameters ---------- s : any hashable type Origin node t : any hashable type Destination node path : list, optional The path to use. If not provided, shortest path is used main_path : bool, optional If *True*, indicates that this path is being traversed by content that will be delivered to the receiver. This is needed to calculate latency correctly in multicast cases. Default value is *True* """ if path is None: path = self.model.shortest_path[u][v] for u, v in path_links(path): self.forward_content_hop(u, v, main_path)
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) # Leave a copy of the content only in the cache one level down the hit # caching node copied = False x = 0.0 c = len([v for v in path if self.view.has_cache(v)]) for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] N = sum([ self.cache_size[n] for n in path[hop - 1:] if n in self.cache_size ]) if v in self.cache_size: x += 1 self.controller.forward_content_hop(u, v) if not copied and v != receiver and self.view.has_cache(v): prob_cache = float(N) / (self.t_tw * self.cache_size[v]) * (x / c)**c if random.random() < prob_cache: self.controller.put_content(v) copied = True self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) # Route requests to original source and queries caches on the path self.controller.start_session(time, receiver, content, log) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) # Leave a copy of the content only in the cache one level down the hit # caching node copied = False x = 0.0 c = len([v for v in path if self.view.has_cache(v)]) for hop in range(1,len(path)): u = path[hop-1] v = path[hop] N = sum([self.cache_size[n] for n in path[hop - 1:] if n in self.cache_size]) if v in self.cache_size: x += 1 self.controller.forward_content_hop(u, v) if not copied and v != receiver and self.view.has_cache(v): prob_cache = float(N)/(self.t_tw * self.cache_size[v])*(x/c)**c if random.random() < prob_cache: self.controller.put_content(v) copied = True self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) receiver_cluster = self.view.cluster(receiver) source_cluster = self.view.cluster(source) cluster_path = self.cluster_sp[receiver_cluster][source_cluster] if self.inter_routing == 'LCE': start = receiver for cluster in cluster_path: cache = self.authoritative_cache(content, cluster) # Forward request to authoritative cache self.controller.forward_request_path(start, cache) start = cache if self.controller.get_content(cache): break else: # Loop was never broken, cache miss self.controller.forward_request_path(start, source) start = source if not self.controller.get_content(source): raise RuntimeError( 'The content is not found the expected source') elif self.inter_routing == 'EDGE': cache = self.authoritative_cache(content, receiver_cluster) self.controller.forward_request_path(receiver, cache) if self.controller.get_content(cache): self.controller.forward_content_path(cache, receiver) self.controller.end_session() return else: self.controller.forward_request_path(cache, source) self.controller.get_content(source) cluster = source_cluster start = source # Now "start" is the node that is serving the content cluster_path = list( reversed(self.cluster_sp[receiver_cluster][cluster])) if self.inter_routing == 'LCE': if self.intra_routing == 'SYMM': for cluster in cluster_path: cache = self.authoritative_cache(content, cluster) # Forward request to authoritative cache self.controller.forward_content_path(start, cache) self.controller.put_content(cache) start = cache self.controller.forward_content_path(start, receiver) elif self.intra_routing == 'ASYMM': self.controller.forward_content_path(start, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set( self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) elif self.intra_routing == 'MULTICAST': destinations = [ self.authoritative_cache(content, cluster) for cluster in cluster_path ] for v in destinations: self.controller.put_content(v) main_path = set( path_links(self.view.shortest_path(start, receiver))) mcast_tree = multicast_tree( self.view.all_pairs_shortest_paths(), start, destinations) mcast_tree = mcast_tree.difference(main_path) for u, v in mcast_tree: self.controller.forward_content_hop(u, v, main_path=False) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) else: raise ValueError("Intra-cluster routing %s not supported" % self.intra_routing) elif self.inter_routing == 'EDGE': if self.intra_routing == 'SYMM': cache = self.authoritative_cache(content, cluster_path[-1]) self.controller.forward_content_path(start, cache) self.controller.forward_content_path(cache, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set( self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) if cache not in traversed_caches: self.controller.put_content(cache) elif self.intra_routing == 'ASYMM': self.controller.forward_content_path(start, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set( self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) elif self.intra_routing == 'MULTICAST': cache = self.authoritative_cache(content, cluster_path[-1]) self.controller.put_content(cache) main_path = set( path_links(self.view.shortest_path(start, receiver))) mcast_tree = multicast_tree( self.view.all_pairs_shortest_paths(), start, [cache]) mcast_tree = mcast_tree.difference(main_path) for u, v in mcast_tree: self.controller.forward_content_hop(u, v, main_path=False) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) else: raise ValueError("Inter-cluster routing %s not supported" % self.inter_routing) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) cache = self.authoritative_cache(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) # Forward request to authoritative cache and check all local caches on path path = self.view.shortest_path(receiver, cache) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if v != cache: if self.controller.get_content_local_cache(v): serving_node = v direct_return = True break else: # No cache hits from local caches on path, query authoritative cache if self.controller.get_content(cache): serving_node = v direct_return = True else: path = self.view.shortest_path(cache, source) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if v != source: if self.controller.get_content_local_cache(v): serving_node = v direct_return = False break else: # No hits from local caches in cache -> source path # Get content from the source self.controller.get_content(source) serving_node = source direct_return = False # Now we have a serving node, let's return the content, while storing # it on all opportunistic caches on the path if direct_return: # Here I just need to return the content directly to the user path = list( reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v != receiver: self.controller.put_content_local_cache(v) self.controller.end_session() return # Here I need to see whether I need symm, asymm or multicast delivery if self.routing == 'SYMM': links = path_links(list(reversed(self.view.shortest_path(cache, serving_node)))) + \ path_links(list(reversed(self.view.shortest_path(receiver, cache)))) for u, v in links: self.controller.forward_content_hop(u, v) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) elif self.routing == 'ASYMM': path = list( reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) elif self.routing == 'MULTICAST': main_path = set( path_links(self.view.shortest_path(serving_node, receiver))) mcast_tree = multicast_tree(self.view.all_pairs_shortest_paths(), serving_node, [receiver, cache]) cache_branch = mcast_tree.difference(main_path) for u, v in cache_branch: self.controller.forward_content_hop(u, v, main_path=False) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) else: raise ValueError("Routing %s not supported" % self.routing) self.controller.end_session()
def process_event(self, time, receiver, content, log): source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) compareTag = True cacheNode = [] self.controller.start_session(time, receiver, content, log) if (self.use_global_popularity): for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: if self.view.is_cache_full(v): if compareTag: for cache in self.view.cache_dump(v): if (content < cache): cacheNode.append(v) compareTag = False break else: if compareTag: cacheNode.append(v) compareTag = False # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list( reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v): # insert content if v in cacheNode: self.controller.put_content(v) else: for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if (content in self.content_popularity_index[v]): self.content_popularity_index[v][ content] = self.content_popularity_index[v][ content] + 1 else: self.content_popularity_index[v][content] = 1 if self.controller.get_content(v): serving_node = v break else: if self.view.is_cache_full(v): if compareTag: contentIndex = self.content_popularity_index[ v][content] for cache in self.view.cache_dump(v): if (contentIndex > self.content_popularity_index[v] [cache]): cacheNode.append(v) compareTag = False break else: if compareTag: cacheNode.append(v) compareTag = False # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list( reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if self.view.has_cache(v): # insert content if v in cacheNode: self.controller.put_content(v) self.controller.end_session()
def compute_clusters(topology, k, distance='delay', nbunch=None, n_iter=10): """Cluster nodes of a topologies as to minimize the intra-cluster latency. This function assumes that every link is labelled with latencies and performs clustering using the k-medoids method with the PAM algorithm. Parameters ---------- topology : Topology The topology k : int The number of clusters distance : str, optional The link metric used to represent distance between nodes. If None, hop count is used instead n_iter : int, optional The number of iterations Return ------ clusters: list of sets List of clusters (each cluster being a set of nodes) """ topology = topology.to_undirected() if nx.number_connected_components(topology) > 1: raise ValueError('The topology has more than one connected component') if nbunch is not None: topology = topology.subgraph(nbunch) topology = nx.convert_node_labels_to_integers(topology, label_attribute='label') if distance is not None: for u, v in topology.edges(): if distance not in topology.adj[u][v]: raise ValueError('Edge (%s, %s) does not have a %s attribute' % (str(topology.node[u]['label']), str(topology.node[v]['label']), distance)) n = topology.number_of_nodes() path = dict(nx.all_pairs_shortest_path(topology)) distances = np.zeros((n, n)) for u in path: for v in path[u]: # Note: need to do something about weights and asymmetric paths! if u == v or distances[u][v] != 0: continue # Extract all edges of a path edges = path_links(path[u][v]) if distance is not None: distances[u][v] = distances[v][u] = sum(topology.adj[u][v][distance] for u, v in edges) else: distances[u][v] = distances[v][u] = len(edges) clusters = [set() for _ in range(k)] medoid_assignment = pam(distances, k=k, n_iter=n_iter)[0] if any(medoid_assignment >= n): raise ValueError('Something is wrong with k-medoids algorithm. ' 'I got an assignment to a medoid that does not exist') medoids = list(set(medoid_assignment)) medoid_cluster_map = {medoids[i]: i for i in range(len(medoids))} # Concert assignments from medoid ID to cluster ID for v in range(n): clusters[medoid_cluster_map[medoid_assignment[v]]].add(topology.node[v]['label']) return clusters
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) cache = self.authoritative_cache(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) # Forward request to authoritative cache and check all local caches on path path = self.view.shortest_path(receiver, cache) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if v != cache: if self.controller.get_content_local_cache(v): serving_node = v direct_return = True break else: # No cache hits from local caches on path, query authoritative cache if self.controller.get_content(cache): serving_node = v direct_return = True else: path = self.view.shortest_path(cache, source) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if v != source: if self.controller.get_content_local_cache(v): serving_node = v direct_return = False break else: # No hits from local caches in cache -> source path # Get content from the source self.controller.get_content(source) serving_node = source direct_return = False # Now we have a serving node, let's return the content, while storing # it on all opportunistic caches on the path if direct_return: # Here I just need to return the content directly to the user path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v != receiver: self.controller.put_content_local_cache(v) self.controller.end_session() return # Here I need to see whether I need symm, asymm or multicast delivery if self.routing == 'SYMM': links = path_links(list(reversed(self.view.shortest_path(cache, serving_node)))) + \ path_links(list(reversed(self.view.shortest_path(receiver, cache)))) for u, v in links: self.controller.forward_content_hop(u, v) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) elif self.routing == 'ASYMM': path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) elif self.routing == 'MULTICAST': main_path = set(path_links(self.view.shortest_path(serving_node, receiver))) mcast_tree = multicast_tree(self.view.all_pairs_shortest_paths(), serving_node, [receiver, cache]) cache_branch = mcast_tree.difference(main_path) for u, v in cache_branch: self.controller.forward_content_hop(u, v, main_path=False) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) else: raise ValueError("Routing %s not supported" % self.routing) self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) receiver_cluster = self.view.cluster(receiver) source_cluster = self.view.cluster(source) cluster_path = self.cluster_sp[receiver_cluster][source_cluster] if self.inter_routing == 'LCE': start = receiver for cluster in cluster_path: cache = self.authoritative_cache(content, cluster) # Forward request to authoritative cache self.controller.forward_request_path(start, cache) start = cache if self.controller.get_content(cache): break else: # Loop was never broken, cache miss self.controller.forward_request_path(start, source) start = source if not self.controller.get_content(source): raise RuntimeError('The content is not found the expected source') elif self.inter_routing == 'EDGE': cache = self.authoritative_cache(content, receiver_cluster) self.controller.forward_request_path(receiver, cache) if self.controller.get_content(cache): self.controller.forward_content_path(cache, receiver) self.controller.end_session() return else: self.controller.forward_request_path(cache, source) self.controller.get_content(source) cluster = source_cluster start = source # Now "start" is the node that is serving the content cluster_path = list(reversed(self.cluster_sp[receiver_cluster][cluster])) if self.inter_routing == 'LCE': if self.intra_routing == 'SYMM': for cluster in cluster_path: cache = self.authoritative_cache(content, cluster) # Forward request to authoritative cache self.controller.forward_content_path(start, cache) self.controller.put_content(cache) start = cache self.controller.forward_content_path(start, receiver) elif self.intra_routing == 'ASYMM': self.controller.forward_content_path(start, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set(self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) elif self.intra_routing == 'MULTICAST': destinations = [self.authoritative_cache(content, cluster) for cluster in cluster_path] for v in destinations: self.controller.put_content(v) main_path = set(path_links(self.view.shortest_path(start, receiver))) mcast_tree = multicast_tree(self.view.all_pairs_shortest_paths(), start, destinations) mcast_tree = mcast_tree.difference(main_path) for u, v in mcast_tree: self.controller.forward_content_hop(u, v, main_path=False) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) else: raise ValueError("Intra-cluster routing %s not supported" % self.intra_routing) elif self.inter_routing == 'EDGE': if self.intra_routing == 'SYMM': cache = self.authoritative_cache(content, cluster_path[-1]) self.controller.forward_content_path(start, cache) self.controller.forward_content_path(cache, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set(self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) if cache not in traversed_caches: self.controller.put_content(cache) elif self.intra_routing == 'ASYMM': self.controller.forward_content_path(start, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set(self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) elif self.intra_routing == 'MULTICAST': cache = self.authoritative_cache(content, cluster_path[-1]) self.controller.put_content(cache) main_path = set(path_links(self.view.shortest_path(start, receiver))) mcast_tree = multicast_tree(self.view.all_pairs_shortest_paths(), start, [cache]) mcast_tree = mcast_tree.difference(main_path) for u, v in mcast_tree: self.controller.forward_content_hop(u, v, main_path=False) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) else: raise ValueError("Inter-cluster routing %s not supported" % self.inter_routing) self.controller.end_session()
def hashrouting_model(topology, routing, hit_ratio, source_content_ratio, req_rates, paths=None): """Compute overall latency of hashrouting over an arbitrary topology Parameters ---------- topology : Topology The topology routing : str ('SYMM | 'MULTICAST') Content routing strategy hit_ratio : float Average cache hit ratio of the system of hash-routed caches source_content_ratio : dict Ratio of contents that each source serve req_rates : dict Rate of requests for each requester paths : dict of dicts, optional Network paths Returns ------- latency : float The average content retrieval latency References ---------- .. [1] L. Saino, I. Psaras and G. Pavlou, Framework and Algorithms for Operator-managed Content Caching, in IEEE Transactions on Network and Service Management (TNSM), Volume 17, Issue 1, March 2020 https://doi.org/10.1109/TNSM.2019.2956525 .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ if routing not in ("SYMM", "MULTICAST"): raise ValueError("Routing {} not supported".format(routing)) if math.fabs(sum(source_content_ratio.values()) - 1) > 0.0001: raise ValueError("The sum of source_content_ratio values must be 1") if paths is None: paths = dict(nx.all_pairs_dijkstra_path(topology)) latencies = {} for u in paths: latencies[u] = {} for v in paths[u]: links = path_links(paths[u][v]) latencies[u][v] = sum(topology.edges[i, j]["delay"] for i, j in links) # Get all caching nodes caches = topology.cache_nodes() overall_req_rate = sum(req_rates.values()) req_ratios = {k: v / overall_req_rate for k, v in req_rates.items()} # Calculate overall latency # This is the latency component between receivers and caches if routing == "SYMM": latency = (1 / len(caches)) * sum( rate * (latencies[recv][cache] + latencies[cache][recv]) for recv, rate in req_ratios.items() for cache in caches) # This is the latency component between caches and sources latency += ((1 - hit_ratio) / len(caches)) * sum( ratio * (latencies[cache][source] + latencies[source][cache]) for cache in caches for source, ratio in source_content_ratio.items()) elif routing == "MULTICAST": # Latency leg receiver-cache latency = (1 / len(caches)) * sum(rate * (latencies[recv][cache]) for recv, rate in req_ratios.items() for cache in caches) # Latency leg cache-receiver (hit case) latency += (hit_ratio / len(caches)) * sum( rate * (latencies[cache][recv]) for recv, rate in req_ratios.items() for cache in caches) # Latency leg caches-sources (miss case) latency += ((1 - hit_ratio) / len(caches)) * sum( ratio * (latencies[cache][source]) for cache in caches for source, ratio in source_content_ratio.items()) # Latency leg sources-receivers (miss case) latency += (1 - hit_ratio) * sum( source_ratio * req_ratio * (latencies[source][receiver]) for receiver, req_ratio in req_ratios.items() for source, source_ratio in source_content_ratio.items()) else: # Should never reach this block anyway raise ValueError("Routing {} not supported".format(routing)) return latency
def process_event(self, time, receiver, content, log): source = self.view.content_source(content) self.controller.start_session(time, receiver, content, log) path = self.view.shortest_path(receiver, source) # Interest Forwarding router = path[1] if path[0] != receiver: print "Error: path[0]" router = path[0] self.controller.forward_request_hop(receiver, router) if self.controller.get_content(router): self.controller.forward_content_hop(router, receiver) self.controller.end_session() interest_path = [receiver, router] cache_hit = False locator_hit = False locator = self.controller.get_rsn(router) #RSN get self.controller.put_rsn(router, source) #RSN put if locator is None: # Forward the packet to a route service closest_rs = self.view.closest_rs_source(content, router) path = self.view.shortest_path(router, closest_rs) for prev_node, curr_node in path_links(path): self.controller.forward_request_hop(prev_node, curr_node) interest_path.append(curr_node) if self.controller.get_content(curr_node): cache_hit = True break locator = self.controller.get_rsn(curr_node) #RSN get if locator is not None: # Found a cached locator locator_hit = True break else: # Reached the routing service. Now forward the packet to the source. path = self.view.shortest_path(closest_rs, source) for prev_node, curr_node in path_links(path): interest_path.append(curr_node) self.controller.forward_request_hop(prev_node, curr_node) if self.controller.get_content(curr_node): cache_hit = True break if locator_hit: path = self.view.shortest_path(interest_path[-1], source) for prev_node, curr_node in path_links(path): interest_path.append(curr_node) self.controller.forward_request_hop(prev_node, curr_node) if self.controller.get_content(curr_node): cache_hit = True break self.controller.put_rsn(curr_node, source) #RSN put elif locator == source: # Forward the packet towards the source path = self.view.shortest_path(router, source) for prev_node, curr_node in path_links(path): interest_path.append(curr_node) self.controller.forward_request_hop(prev_node, curr_node) if self.controller.get_content(curr_node): cache_hit = True break self.controller.put_rsn(curr_node, source) #RSN put else: print("Error: Locator is: " + str(locator) + " and source is: " + str(source)) raise ValueError('Incorrect locator is returned') # Data Forwarding curr_node = interest_path[-1] for prev_node, curr_node in path_links(reversed(interest_path)): self.controller.forward_content_hop(prev_node, curr_node) self.controller.put_content() self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) # downloaded_content_locations = self.view.content_locations_of_receivers( content) self.controller.start_session(time, receiver, content, log) best_serving_node = '' if self.view.has_cache( receiver ): #checking if the asking node is same as delevering node if self.controller.get_content(receiver): serving_node = receiver self.controller.end_session() self.counter_cache_same_receiver = self.counter_cache_same_receiver + 1 return #print self.counter_same_receiver if len(downloaded_content_locations) > 0: #checks if the list is empty if len(downloaded_content_locations) == 1: best_serving_node = downloaded_content_locations[0] else: nodes_with_shortest_path = [] lengths_from_rec = [] for i in downloaded_content_locations: lengths_from_rec.append( len(self.view.shortest_path(receiver, i))) shortest_lengths_index = [ i for i, mi in enumerate(lengths_from_rec) if mi == min(lengths_from_rec) ] for i in shortest_lengths_index: nodes_with_shortest_path.append( downloaded_content_locations[i]) if len(nodes_with_shortest_path) == 1: best_serving_node = nodes_with_shortest_path[0] else: best_serving_node = random.choice( nodes_with_shortest_path ) #chooses one random receiver node print('cache retrieved from download') else: best_serving_node = source path = self.view.shortest_path(receiver, best_serving_node) # Route requests to either to downloaded content node or source and queries caches on the path for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break else: # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) caches = [v for v in path[1:-1] if self.view.has_cache(v)] designated_cache = random.choice(caches) if len(caches) > 0 else None for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == designated_cache: self.controller.put_content(v) self.controller.put_content(receiver) #put content on the receiver self.controller.end_session()
def process_event(self, time, receiver, content, log): # get all required data # print(self.old_reward) serving_node = None source = self.view.content_source(content) path = self.view.shortest_path(receiver, source) rsu = path[1] self.controller.start_session(time, receiver, content, log) if self.view.cache_lookup(rsu, content): self.controller.get_content(rsu) serving_node = rsu # Check neighbors # if serving_node == None: # for node in self.controller.get_neigbbors(rsu): # if node != source: # if self.view.cache_lookup(node, content): # self.controller.get_content(rsu) # self.controller.get_content(node) # serving_node = node # break # Route requests to original source and queries caches on the path if serving_node == None: for u, v in path_links(path): self.controller.forward_request_hop(u, v) if self.view.has_cache(v): if self.controller.get_content(v): serving_node = v break # No cache hits, get content from source self.controller.get_content(v) serving_node = v # Return content path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) self.controller.put_content(rsu) state = self.controller.get_state(rsu, content) action = self.controller.get_best_action(rsu, state) action_converted = self.controller.convert_action(action) if action_converted[content - 1]: policy = [ n + 1 for n, p in enumerate(action_converted) if p and n + 1 != content ] self.controller.set_replacement_candidates(rsu, policy) self.controller.put_content(rsu) reward_after = self.controller.get_avg_reward() if reward_after < self.old_reward: self.controller.revert_back_cache(rsu) else: self.old_reward = reward_after self.controller.train_model(rsu, state) self.controller.save_observation(rsu, state, action, self.controller.get_reward(rsu)) self.controller.end_session()
def compute_clusters(topology, k, distance='delay', nbunch=None, n_iter=10): """Cluster nodes of a topologies as to minimize the intra-cluster latency. This function assumes that every link is labelled with latencies and performs clustering using the k-medoids method with the PAM algorithm. Parameters ---------- topology : Topology The topology k : int The number of clusters distance : str, optional The link metric used to represent distance between nodes. If None, hop count is used instead n_iter : int, optional The number of iterations Return ------ clusters: list of sets List of clusters (each cluster being a set of nodes) """ topology = topology.to_undirected() if nx.number_connected_components(topology) > 1: raise ValueError('The topology has more than one connected component') if nbunch is not None: topology = topology.subgraph(nbunch) topology = nx.convert_node_labels_to_integers(topology, label_attribute='label') if distance is not None: for u, v in topology.edges(): if distance not in topology.adj[u][v]: raise ValueError('Edge (%s, %s) does not have a %s attribute' % (str(topology.node[u]['label']), str(topology.node[v]['label']), distance)) n = topology.number_of_nodes() path = dict(nx.all_pairs_shortest_path(topology)) distances = np.zeros((n, n)) for u in path: for v in path[u]: # Note: need to do something about weights and asymmetric paths! if u == v or distances[u][v] != 0: continue # Extract all edges of a path edges = path_links(path[u][v]) if distance is not None: distances[u][v] = distances[v][u] = sum( topology.adj[u][v][distance] for u, v in edges) else: distances[u][v] = distances[v][u] = len(edges) clusters = [set() for _ in range(k)] medoid_assignment = pam(distances, k=k, n_iter=n_iter)[0] if any(medoid_assignment >= n): raise ValueError('Something is wrong with k-medoids algorithm. ' 'I got an assignment to a medoid that does not exist') medoids = list(set(medoid_assignment)) medoid_cluster_map = {medoids[i]: i for i in range(len(medoids))} # Concert assignments from medoid ID to cluster ID for v in range(n): clusters[medoid_cluster_map[medoid_assignment[v]]].add( topology.node[v]['label']) return clusters