def test_capacity_set(self): for size in SIZES: l = LRU(size) for i in range(size+5): l[i] = str(i) l.set_size(size+10) self.assertTrue(size+10 == l.get_size()) self.assertTrue(len(l) == size) for i in range(size+20): l[i] = str(i) self.assertTrue(len(l) == size+10) l.set_size(size+10-1) self.assertTrue(len(l) == size+10-1)
def test_callback(self): counter = [0] first_key = 'a' first_value = 1 def callback(key, value): self.assertEqual(key, first_key) self.assertEqual(value, first_value) counter[0] += 1 l = LRU(1, callback=callback) l[first_key] = first_value l['b'] = 1 # test calling the callback self.assertEqual(counter[0], 1) self.assertEqual(l.keys(), ['b']) l['b'] = 2 # doesn't call callback self.assertEqual(counter[0], 1) self.assertEqual(l.keys(), ['b']) self.assertEqual(l.values(), [2]) l = LRU(1, callback=callback) l[first_key] = first_value l.set_callback(None) l['c'] = 1 # doesn't call callback self.assertEqual(counter[0], 1) self.assertEqual(l.keys(), ['c']) l.set_callback(callback) del l['c'] # doesn't call callback self.assertEqual(counter[0], 1) self.assertEqual(l.keys(), []) l = LRU(2, callback=callback) l['a'] = 1 # test calling the callback l['b'] = 2 # test calling the callback self.assertEqual(counter[0], 1) self.assertEqual(l.keys(), ['b', 'a']) l.set_size(1) self.assertEqual(counter[0], 2) # callback invoked self.assertEqual(l.keys(), ['b'])
class GameState(object, metaclass=Singleton): def __init__(self): self.is_finished = False self.active_player = None self.other_players = LRU(10) self.world_map = LRU(1000) @classmethod def get_instance(self): return GameState() @guard_exception(False) @log_exception def parse_net_frame(self, net_frame): # parse current player ap = PlayerState.from_dict(net_frame["active_player"]) if self.active_player is None: self.active_player = ap else: self.active_player.update(ap) # parse other aoi players ops = net_frame["other_aoi_players"] if len(ops) > self.other_players.get_size(): self.other_players.set_size(len(ops)) for op in ops: op = PlayerState.from_dict(op) if op.name not in self.other_players: self.other_players[op.name] = op else: self.other_players[op.name].update(op) # parse world map element wm = net_frame["world_map"] if len(wm) > self.world_map.get_size(): self.world_map.set_size(len(wm)) for item in wm: x, y, val = item wm[(x, y)] = val return True def get_update(self, net_frame): ret = self.parse_net_frame(net_frame) if ret: return self
l[3] # Accessing an item would make it MRU print(l.items()) # Would print [(3, '3'), (5, '5'), (4, '4'), (2, '2'), (1, '1')] # Now 3 is in front l.keys() # Can get keys alone in MRU order # Would print [3, 5, 4, 2, 1] del l[4] # Delete an item print(l.items()) # Would print [(3, '3'), (5, '5'), (2, '2'), (1, '1')] print(l.get_size()) # Would print 5 l.set_size(3) print(l.items()) # Would print [(3, '3'), (5, '5'), (2, '2')] print(l.get_size()) # Would print 3 print(l.has_key(5)) # Would print True print(2 in l) # Would print True l.get_stats() # Would print (1, 0) l.update(5='0') # Update an item print l.items() # Would print [(5, '0'), (3, '3'), (2, '2')]
class Cache: """Class representing D3N.""" # Replacement policies LRU = "LRU" LFU = "LFU" LRU_S = "LRU_S" FIFO = "FIFO" RAND = "RAND" # Write policies WRITE_BACK = "WB" WRITE_THROUGH = "WT" # Layer L1 = "L1" L2 = "L2" consistent = "consistent" rendezvous = "rendezvous" rr = "rr" def __init__(self, layer, size, replace_pol, write_pol, hash_ring, hash_type, obj_size, full_size, logger): self._replace_pol = replace_pol # Replacement policy self._write_pol = write_pol # Write policy self._layer = layer # Layer info self._size = size # Cache size self.spaceLeft = size # Cache size self._logger = logger self.hashmap = {} # Mapping self.hash_ring = hash_ring self._hash_type = hash_type self._obj_size = obj_size if (self._size == 0): self.zerosize = True self._size = 1 else: self.zerosize = False if (self._replace_pol == Cache.LRU): self.cache = LRU(self._size) elif (self._replace_pol == Cache.FIFO): self.cache = deque() elif (self._replace_pol == Cache.LRU_S): self.cache = LRU(self._size) self.shadow = LRU(full_size) self.hist = [] for i in range(full_size): self.hist.append(0) # Statistics self._hit_count = 0 self._miss_count = 0 self._backend_bw = 0 self._crossrack_bw = 0 self._intrarack_bw = 0 self.miss_lat = 0 self.lat_count = 0 def _insert1(self, key, size): # No eviction if not self.zerosize: if (self._replace_pol == Cache.LRU_S): self.shadow[key] = 1 if (int(size) <= self.spaceLeft): if (self._replace_pol == Cache.LRU): self.cache[key] = int(size) elif (self._replace_pol == Cache.LRU_S): self.cache[key] = int(size) elif (self._replace_pol == Cache.FIFO): self.cache.append(key) self.hashmap[key] = int(size) self.spaceLeft -= int(size) else: while (int(size) > self.spaceLeft): self._evict() if (self._replace_pol == Cache.LRU): self.cache[key] = int(size) elif (self._replace_pol == Cache.LRU_S): self.cache[key] = int(size) elif (self._replace_pol == Cache.FIFO): self.cache.append(key) self.hashmap[key] = int(size) self.spaceLeft -= int(size) def _insert(self, key, size): # No eviction if not self.zerosize: if (self._replace_pol == Cache.LRU_S): self.cache[key] = int(size) self.shadow[key] = int(size) elif (self._replace_pol == Cache.LRU): self.cache[key] = int(size) else: if (int(size) <= self.spaceLeft): if (self._replace_pol == Cache.LRU): self.cache[key] = int(size) elif (self._replace_pol == Cache.LRU_S): self.cache[key] = int(size) elif (self._replace_pol == Cache.FIFO): self.cache.append(key) self.hashmap[key] = int(size) self.spaceLeft -= int(size) else: while (int(size) > self.spaceLeft): self._evict() if (self._replace_pol == Cache.LRU): self.cache[key] = int(size) elif (self._replace_pol == Cache.LRU_S): self.cache[key] = int(size) elif (self._replace_pol == Cache.FIFO): self.cache.append(key) self.hashmap[key] = int(size) self.spaceLeft -= int(size) def read1(self, key, size): if self._layer == "BE": return 1 if self.zerosize == True: return None """Read a object from the cache.""" r = None if (self._replace_pol == Cache.LRU_S): if self.shadow.has_key(key): count = 0 for i in self.shadow.keys(): if i == key: self.hist[count] += 1 break count += 1 self.shadow[key] = 1 if key in self.hashmap: if (self._replace_pol == Cache.LRU): self._update_use(key) elif (self._replace_pol == Cache.LRU_S): self._update_use(key) self._hit_count += 1 r = 1 else: self._miss_count += 1 return r def read(self, key, size): if self._layer == "BE": return 1 if self.zerosize == True: return None """Read a object from the cache.""" r = None if (self._replace_pol == Cache.LRU_S): if self.cache.has_key(key): self._hit_count += 1 self.cache[key] = self.cache[key] r = 1 else: self._miss_count += 1 if self.shadow.has_key(key): count = 0 for i in self.shadow.keys(): if i == key: self.hist[count] += 1 break count += 1 self.shadow[key] = 1 else: if key in self.hashmap: if (self._replace_pol == Cache.LRU): self._update_use(key) elif (self._replace_pol == Cache.LRU_S): self._update_use(key) self._hit_count += 1 r = 1 else: self._miss_count += 1 return r def checkKey(self, key): if self._layer == "BE": return 1 if self.zerosize == True: return 0 """Read a object from the cache.""" r = 0 if (self._replace_pol == Cache.LRU_S) or (self._replace_pol == Cache.LRU): if self.cache.has_key(key): r = 1 else: r = 0 return r def _evict(self): if (self._replace_pol == Cache.LRU): id = self.cache.peek_last_item()[0] del self.cache[id] elif (self._replace_pol == Cache.LRU_S): id = self.cache.peek_last_item()[0] del self.cache[id] elif (self._replace_pol == Cache.FIFO): id = self.cache.popleft() self.spaceLeft += int(self.hashmap[id]) del self.hashmap[id] def _update_use(self, key): """Update the use of a cache.""" if (self._replace_pol == Cache.LRU): self.cache[key] = self.hashmap[key] if (self._replace_pol == Cache.LRU_S): self.cache[key] = self.hashmap[key] def set_cache_size(self, size): new_size = self.cache.get_size() + int(size) self.cache.set_size(int(new_size)) def set_backend_bw(self, value): self._backend_bw += value def set_crossrack_bw(self, value): self._crossrack_bw += value def set_intrarack_bw(self, value): self._intrarack_bw += value def get_backend_bw(self): return self._backend_bw def get_crossrack_bw(self): return self._crossrack_bw def get_intrarack_bw(self): return self._intrarack_bw def get_replace_pol(self): return self._replace_pol def get_hit_count(self): return self._hit_count def get_miss_count(self): return self._miss_count def get_available_space(self): return self.spaceLeft def get_replace_poll(self): return self._replace_pol def reset_shadow_cache(): self.shadow.clear() def print_cache(self): print self.cache def get_l2_address(self, key): if (self._hash_type == Cache.consistent): return self.hash_ring.get_node(key) elif (self._hash_type == Cache.rendezvous): return self.hash_ring.find_node(key) elif (self._hash_type == Cache.rr): val = key.split("_")[1] res = int(val) % int(self.hash_ring) return res
class Streamer: """ streamer for flows management """ num_streamers = 0 def __init__(self, source=None, capacity=128000, active_timeout=120, inactive_timeout=60, user_metrics=None, user_classifiers=None, enable_ndpi=True): Streamer.num_streamers += 1 self.__exports = [] self.source = source self.__flows = LRU(capacity, callback=emergency_callback) # LRU cache self._capacity = self.__flows.get_size( ) # Streamer capacity (default: 128000) self.active_timeout = active_timeout # expiration active timeout self.inactive_timeout = inactive_timeout # expiration inactive timeout self.current_flows = 0 # counter for stored flows self.flows_number = 0 self.current_tick = 0 # current timestamp self.processed_packets = 0 # current timestamp # Python dictionaries to hold current and archived flow records self.flow_cache = OrderedDict() self.user_classifiers = {} if user_classifiers is not None: try: classifier_iterator = iter(user_classifiers) for classifier in classifier_iterator: if isinstance(classifier, NFStreamClassifier): self.user_classifiers[classifier.name] = classifier except TypeError: self.user_classifiers[user_classifiers.name] = user_classifiers self.user_metrics = {} if enable_ndpi: ndpi_classifier = NDPIClassifier('ndpi') self.user_classifiers[ndpi_classifier.name] = ndpi_classifier if user_metrics is not None: self.user_metrics = user_metrics def _get_capacity(self): """ getter for capacity attribute """ return self.__flows.get_size() def _set_capacity(self, new_size): """ setter for capacity size attribute """ return self.__flows.set_size(new_size) capacity = property(_get_capacity, _set_capacity) def terminate(self): """ terminate all entries in Streamer """ remaining_flows = True while remaining_flows: try: key, value = self.__flows.peek_last_item() value.export_reason = 2 self.exporter(value) except TypeError: remaining_flows = False for classifier_name, classifier in self.user_classifiers.items(): self.user_classifiers[classifier_name].on_exit() def exporter(self, flow): """ export method for a flow trigger_type:0(inactive), 1(active), 2(flush) """ # Look for the flow in the created classifiers for classifier_name, classifier in self.user_classifiers.items(): # Terminate the flow in the respective classifiers self.user_classifiers[classifier_name].on_flow_terminate(flow) # Delete the flow register from the active flows collection del self.__flows[flow.key] # Decrease the number of active flows by 1 self.current_flows -= 1 # Add the expired flow register to the final flows collection self.__exports.append(flow) def inactive_watcher(self): """ inactive expiration management """ remaining_inactives = True # While there are inactive flow registers while remaining_inactives: try: # Obtain the last flow register (Least Recently Used - LRU) in the variable value using its key key, value = self.__flows.peek_last_item() # Has the flow exceeded the inactive timeout (1 minute)? if (self.current_tick - value.end_time) >= (self.inactive_timeout * 1000): # Set export reason to 0 (inactive) in the flow value.export_reason = 0 # Export the flow to the final flows collection self.exporter(value) # There are no flows that can be declared inactive yet else: # Stop the inactive watcher until it is called again remaining_inactives = False except TypeError: remaining_inactives = False def consume(self, pkt_info): """ consume a packet and update Streamer status """ self.processed_packets += 1 # increment total processed packet counter # Obtain a flow hash key for identification of the flow key = get_flow_key(pkt_info) print("\nCONSUMING PACKET FROM FLOW:", key) # Is this packet from a registered flow? if key in self.__flows: print("FLOW FOUND - UPDATING STATISTICS") # Checking current status of the flow that the packet belongs to # -1 active flow - 0 inactive flow - 1 active flow timeout expired - 2 flush remaining flows in LRU # 3 FIN flag detected - 4 RST flag detected flow_status = self.__flows[key].update_and_check_flow_status( pkt_info, self.active_timeout, self.user_classifiers, self.user_metrics) #Has the active timeout of the flow register expired (2 minutes)? if (flow_status == 1): # Export the old flow register to the final collection and terminate this flow process on the specified classifier self.exporter(self.__flows[key]) # Create a new flow register for the current packet flow = Flow(pkt_info, self.user_classifiers, self.user_metrics, self.flow_cache) # Add the new flow to the active flows collection using the same Hash key self.__flows[flow.key] = flow # Create the entry on the flow_cache with the flow key del self.flow_cache[flow.key] self.flow_cache[flow.key] = {} # Update the flow status on the collection flow.create_new_flow_record(pkt_info, self.user_classifiers, self.user_metrics) if (flow_status == 3 ): # FIN FLAG DETECTED IN BOTH DIRECTIONS - EXPORTING FLOW self.exporter(self.__flows[key]) if ( flow_status == 4 ): # RST FLAG FOUND - UPDATING BIDIRECTIONAL STATISTICS - EXPORTING FLOW self.exporter(self.__flows[key]) if (flow_status == 5): # FIN FLAG TIMER EXPIRED self.exporter(self.__flows[key]) print("****FLOW EXPORTED") """ expired_flow = self.__flows[key] print("****STARTING TCP TIMER") threading.Timer(20, self.export_incomplete_flow(expired_flow)) """ # This packet belongs to a new flow else: # Increase the count of current active flows print("FLOW NOT FOUND - CREATING NEW FLOW REGISTER") # Update flow counters self.current_flows += 1 self.flows_number += 1 # Create the new flow object flow = Flow(pkt_info, self.user_classifiers, self.user_metrics, self.flow_cache) # Add this new flow register to the LRU self.__flows[flow.key] = flow # Create the entry on the flow_cache with the flow key self.flow_cache[flow.key] = {} # Create the new bidirectional flow record flow.create_new_flow_record(pkt_info, self.user_classifiers, self.user_metrics) # Set the current start time on the streamer timer to keep control of the inactive flows self.current_tick = flow.start_time # Remove the Least Recently Used (LRU) flow record from the active flows collection # and export it to the final flows collection if its inactive timeout has been exceeded self.inactive_watcher() print( "*******************PACKET CONSUMED - MOVING TO NEXT*********************************" ) """ def export_incomplete_flow(self, expired_flow): print("##############################---TCP TIMER EXPIRED--#######################") # Look for the flow in the created classifiers self.flows_number += 1 for classifier_name, classifier in self.user_classifiers.items(): # Terminate the flow in the respective classifiers self.user_classifiers[classifier_name].on_flow_terminate(expired_flow) self.__exports.append(expired_flow) print("##############################---EXPIRED FLOW EXPORTED-----###############################") """ def __iter__(self): # Create the packet information generator pkt_info_gen = Observer(source=self.source) # Extract each packet information from the network interface or pcap file for pkt_info in pkt_info_gen: if pkt_info is not None: # Check if the packet belongs to an existent flow or create a new one self.consume(pkt_info) for export in self.__exports: yield export self.__exports = [] # Terminate the streamer self.terminate() for export in self.__exports: yield export self.__exports = []