class NCacheController(object): def __init__(self, sw_name): self.topo = Topology(db="./topology.db") self.sw_name = sw_name self.thrift_port = self.topo.get_thrift_port(self.sw_name) self.cpu_port = self.topo.get_cpu_port_index(self.sw_name) self.controller = SimpleSwitchAPI(self.thrift_port) self.custom_calcs = self.controller.get_custom_crc_calcs() self.sketch_register_num = len(self.custom_calcs) self.setup() def setup(self): if self.cpu_port: self.controller.mirroring_add(CONTROLLER_MIRROR_SESSION, self.cpu_port) # set a static allocation scheme for l2 forwarding where the mac address of # each host is associated with the port connecting this host to the switch def set_forwarding_table(self): for host in self.topo.get_hosts_connected_to(self.sw_name): port = self.topo.node_to_node_port_num(self.sw_name, host) host_mac = self.topo.get_host_mac(host) print str(host_mac) + str(port) self.controller.table_add("l2_forward", "set_egress_port", [str(host_mac)], [str(port)]) def main(self): self.set_forwarding_table()
class RoutingController(object): def __init__(self): self.topo = Topology(db="topology.db") self.controllers = {} self.init() def init(self): self.connect_to_switches() self.reset_states() self.set_table_defaults() def reset_states(self): [controller.reset_state() for controller in self.controllers.values()] def connect_to_switches(self): for p4switch in self.topo.get_p4switches(): thrift_port = self.topo.get_thrift_port(p4switch) self.controllers[p4switch] = SimpleSwitchAPI(thrift_port) def set_table_defaults(self): for controller in self.controllers.values(): controller.table_set_default("ipv4_lpm", "drop", []) controller.table_set_default("ecmp_group_to_nhop", "drop", []) def add_mirroring_ids(self): for sw_name, controller in self.controllers.items(): controller.mirroring_add(100, 1) def set_egress_type_table(self): for sw_name, controller in self.controllers.items(): for intf, node in self.topo.get_interfaces_to_node( sw_name).items(): node_type = self.topo.get_node_type(node) port_number = self.topo.interface_to_port(sw_name, intf) if node_type == 'host': node_type_num = 1 elif node_type == 'switch': node_type_num = 2 print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add("egress_type", "set_egress_type", [str(port_number)], [str(node_type_num)]) def route(self): switch_ecmp_groups = { sw_name: {} for sw_name in self.topo.get_p4switches().keys() } for sw_name, controller in self.controllers.items(): for sw_dst in self.topo.get_p4switches(): #if its ourselves we create direct connections if sw_name == sw_dst: for host in self.topo.get_hosts_connected_to(sw_name): sw_port = self.topo.node_to_node_port_num( sw_name, host) host_ip = self.topo.get_host_ip(host) + "/32" host_mac = self.topo.get_host_mac(host) #add rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "set_nhop", [str(host_ip)], [str(host_mac), str(sw_port)]) #check if there are directly connected hosts else: if self.topo.get_hosts_connected_to(sw_dst): paths = self.topo.get_shortest_paths_between_nodes( sw_name, sw_dst) for host in self.topo.get_hosts_connected_to(sw_dst): if len(paths) == 1: next_hop = paths[0][1] host_ip = self.topo.get_host_ip(host) + "/24" sw_port = self.topo.node_to_node_port_num( sw_name, next_hop) dst_sw_mac = self.topo.node_to_node_mac( next_hop, sw_name) #add rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "set_nhop", [str(host_ip)], [str(dst_sw_mac), str(sw_port)]) elif len(paths) > 1: next_hops = [x[1] for x in paths] dst_macs_ports = [ (self.topo.node_to_node_mac( next_hop, sw_name), self.topo.node_to_node_port_num( sw_name, next_hop)) for next_hop in next_hops ] host_ip = self.topo.get_host_ip(host) + "/24" #check if the ecmp group already exists. The ecmp group is defined by the number of next #ports used, thus we can use dst_macs_ports as key if switch_ecmp_groups[sw_name].get( tuple(dst_macs_ports), None): ecmp_group_id = switch_ecmp_groups[ sw_name].get(tuple(dst_macs_ports), None) print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "ecmp_group", [str(host_ip)], [ str(ecmp_group_id), str(len(dst_macs_ports)) ]) #new ecmp group for this switch else: new_ecmp_group_id = len( switch_ecmp_groups[sw_name]) + 1 switch_ecmp_groups[sw_name][tuple( dst_macs_ports)] = new_ecmp_group_id #add group for i, (mac, port) in enumerate(dst_macs_ports): print "table_add at {}:".format( sw_name) self.controllers[sw_name].table_add( "ecmp_group_to_nhop", "set_nhop", [str(new_ecmp_group_id), str(i)], [str(mac), str(port)]) #add forwarding rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "ecmp_group", [str(host_ip)], [ str(new_ecmp_group_id), str(len(dst_macs_ports)) ]) def main(self): self.set_egress_type_table() self.add_mirroring_ids() self.route()
class RoutingController(object): def __init__(self): self.topo = Topology(db="topology.db") self.controllers = {} self.init() def init(self): self.connect_to_switches() self.reset_states() self.set_table_defaults() def reset_states(self): [controller.reset_state() for controller in self.controllers.values()] def connect_to_switches(self): for p4switch in self.topo.get_p4switches(): thrift_port = self.topo.get_thrift_port(p4switch) self.controllers[p4switch] = SimpleSwitchAPI(thrift_port) def set_table_defaults(self): for controller in self.controllers.values(): controller.table_set_default("ipv4_lpm", "drop", []) controller.table_set_default("ecmp_group_to_nhop", "drop", []) def get_conn_host_infos(self, p4switch): connected_hosts = self.topo.get_hosts_connected_to(p4switch) if connected_hosts: host = connected_hosts[0] switch_infos = self.topo.node(p4switch) host_mac = self.topo.get_host_mac(host) host_ip = self.topo.get_host_ip(host) + '/32' output_iface = self.topo.interface_to_port( p4switch, switch_infos[host]['intf']) return host_ip, host_mac, output_iface else: return None, None, None def add_ecmp_group(self, p4switch, ss_api, neigh, paths, ecmp_group): host_ip, host_mac, output_iface = self.get_conn_host_infos(neigh) if host_ip: next_hops = [path[1] for path in paths] dst_macs_ports = [ (self.topo.node_to_node_mac(next_hop, p4switch), self.topo.node_to_node_port_num(p4switch, next_hop)) for next_hop in next_hops ] if ecmp_group.get(p4switch): if ecmp_group[p4switch].get(tuple(dst_macs_ports)): ecmp_group[p4switch][tuple( dst_macs_ports )] = ecmp_group[p4switch][tuple(dst_macs_ports)] + 1 else: ecmp_group[p4switch][tuple(dst_macs_ports)] = 1 else: ecmp_group[p4switch] = {} ecmp_group[p4switch][tuple(dst_macs_ports)] = 1 print('Adding multipath entries') ss_api.table_add('ipv4_lpm', 'ecmp_group', [host_ip], [str(1), str(len(next_hops))]) index = 0 for dst_mac_port in dst_macs_ports: ss_api.table_add( 'ecmp_group_to_nhop', 'set_nhop', [str(1), str(index)], [dst_mac_port[0], str(dst_mac_port[1])]) index = index + 1 return None def add_route_via_best(self, p4switch, ss_api, neigh, path): host_ip, host_mac, output_iface = self.get_conn_host_infos(neigh) if host_ip: neigh_mac = self.topo.node_to_node_mac(neigh, p4switch) output_iface = self.topo.node_to_node_port_num(p4switch, neigh) print('Add route via best', host_ip, neigh_mac, output_iface) ss_api.table_add('ipv4_lpm', 'set_nhop', [host_ip], [neigh_mac, str(output_iface)]) def add_directly_conn_host(self, p4switch, ss_api): host_ip, host_mac, output_iface = self.get_conn_host_infos(p4switch) if host_ip: print('Add directly connected route ', host_ip, host_mac, output_iface) ss_api.table_add('ipv4_lpm', 'set_nhop', [host_ip], [host_mac, str(output_iface)]) def route(self): """implement this function""" ecmp_group = {} for p4switch, ss_api in self.controllers.items(): for neigh in self.topo.get_p4switches(): if p4switch == neigh: # Check if we have connected hosts self.add_directly_conn_host(p4switch, ss_api) else: shortest_path = self.topo.get_shortest_paths_between_nodes( p4switch, neigh) if len(shortest_path) < 2: # There is only 1 path self.add_route_via_best(p4switch, ss_api, neigh, shortest_path) else: # multipath self.add_ecmp_group(p4switch, ss_api, neigh, shortest_path, ecmp_group) #print(self.topo.node(p4switch)['interfaces_to_node']) #for iface, neigh in self.topo.node(p4switch)['interfaces_to_node'].items(): # print(self.topo.node_to_node_port_num(p4switch, neigh)) def main(self): self.route()
class RoutingController(object): def __init__(self): self.topo = Topology(db="topology.db") self.controllers = {} self.init() def init(self): self.connect_to_switches() self.reset_states() self.set_table_defaults() ''' OPTIONS FOR DEMO ''' self.apply_src_priority = True self.apply_dst_priority = False self.src_high_priority = 'h1' self.src_low_priority = 'h4' self.dst_high_priority = 'h5' self.dst_low_priority = 'h8' def reset_states(self): [controller.reset_state() for controller in self.controllers.values()] def connect_to_switches(self): for p4switch in self.topo.get_p4switches(): thrift_port = self.topo.get_thrift_port(p4switch) self.controllers[p4switch] = SimpleSwitchAPI(thrift_port) def set_table_defaults(self): for controller in self.controllers.values(): controller.table_set_default("ipv4_lpm", "drop", []) controller.table_set_default("ecmp_group_to_nhop", "drop", []) def set_tables(self): # From project 6 # Function outside of route() that sets the egress type table # loops through all switches for sw_name, controller in self.controllers.items(): # gets the interface and node type for interface, node in self.topo.get_interfaces_to_node( sw_name).items(): node_type = self.topo.get_node_type(node) port_number = self.topo.interface_to_port(sw_name, interface) # numerates the node types to be put in the table if node_type == 'host': node_type_num = 1 # NEW - CODE TO SET PRIORITY BASED ON HOST NUMBER host_ip = self.topo.get_host_ip(node) + "/24" priority_num = 2 if str( node ) == self.src_high_priority and self.apply_src_priority: priority_num = 1 elif str( node ) == self.src_low_priority and self.apply_src_priority: priority_num = 3 elif str( node ) == self.dst_high_priority and self.apply_dst_priority: priority_num = 1 elif str( node ) == self.dst_low_priority and self.apply_dst_priority: priority_num = 3 print "Node name: {}, ip address: {}, priority: {}".format( str(node), str(host_ip), str(priority_num)) self.controllers[sw_name].table_add( "priority_type", "set_priority", [str(host_ip)], [str(priority_num)]) if self.apply_dst_priority: self.controllers[sw_name].table_add( "priority_type_dst", "set_priority", [str(host_ip)], [str(priority_num)]) elif node_type == 'switch': node_type_num = 2 # fills the table self.controllers[sw_name].table_add("egress_type", "set_type", [str(port_number)], [str(node_type_num)]) def add_mirroring_ids(self): for sw_name, controller in self.controllers.items(): # adding port 1 (it seems like the first argument is standard) controller.mirroring_add(100, 1) def route(self): switch_ecmp_groups = { sw_name: {} for sw_name in self.topo.get_p4switches().keys() } for sw_name, controller in self.controllers.items(): for sw_dst in self.topo.get_p4switches(): #if its ourselves we create direct connections if sw_name == sw_dst: for host in self.topo.get_hosts_connected_to(sw_name): sw_port = self.topo.node_to_node_port_num( sw_name, host) host_ip = self.topo.get_host_ip(host) + "/32" host_mac = self.topo.get_host_mac(host) #add rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "set_nhop", [str(host_ip)], [str(host_mac), str(sw_port)]) #check if there are directly connected hosts else: if self.topo.get_hosts_connected_to(sw_dst): paths = self.topo.get_shortest_paths_between_nodes( sw_name, sw_dst) for host in self.topo.get_hosts_connected_to(sw_dst): if len(paths) == 1: next_hop = paths[0][1] host_ip = self.topo.get_host_ip(host) + "/24" sw_port = self.topo.node_to_node_port_num( sw_name, next_hop) dst_sw_mac = self.topo.node_to_node_mac( next_hop, sw_name) #add rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "set_nhop", [str(host_ip)], [str(dst_sw_mac), str(sw_port)]) elif len(paths) > 1: next_hops = [x[1] for x in paths] dst_macs_ports = [ (self.topo.node_to_node_mac( next_hop, sw_name), self.topo.node_to_node_port_num( sw_name, next_hop)) for next_hop in next_hops ] host_ip = self.topo.get_host_ip(host) + "/24" #check if the ecmp group already exists. The ecmp group is defined by the number of next #ports used, thus we can use dst_macs_ports as key if switch_ecmp_groups[sw_name].get( tuple(dst_macs_ports), None): ecmp_group_id = switch_ecmp_groups[ sw_name].get(tuple(dst_macs_ports), None) print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "ecmp_group", [str(host_ip)], [ str(ecmp_group_id), str(len(dst_macs_ports)) ]) #new ecmp group for this switch else: new_ecmp_group_id = len( switch_ecmp_groups[sw_name]) + 1 switch_ecmp_groups[sw_name][tuple( dst_macs_ports)] = new_ecmp_group_id #add group for i, (mac, port) in enumerate(dst_macs_ports): print "table_add at {}:".format( sw_name) self.controllers[sw_name].table_add( "ecmp_group_to_nhop", "set_nhop", [str(new_ecmp_group_id), str(i)], [str(mac), str(port)]) #add forwarding rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "ecmp_group", [str(host_ip)], [ str(new_ecmp_group_id), str(len(dst_macs_ports)) ]) def main(self): self.set_tables() self.add_mirroring_ids() self.route()
class GenFault(object): def __init__(self, program): if program == "f": self.topo = Topology( db="../p4src_flowsize/topology.db") #set the topology elif program == "i": self.topo = Topology( db="../p4src_interval/topology.db") #set the topology self.controllers = {} #the switches self.init() def init(self): self.connect_to_switches() def connect_to_switches(self): for p4switch in self.topo.get_p4switches(): thrift_port = self.topo.get_thrift_port(p4switch) self.controllers[p4switch] = SimpleSwitchAPI(thrift_port) def loop(self): switches = raw_input( "type the switch's name to gen loop,seperated by ','\nmust be physically loop-able:\n" ).split(',') IPs = [] for sw_name in self.controllers.keys(): for host in self.topo.get_hosts_connected_to(sw_name): host_ip = self.topo.get_host_ip(host) + "/24" IPs.append(host_ip) for i in range(len(switches)): sw_name = switches[i] self.controllers[sw_name].table_clear("ecmp_group_to_nhop") self.controllers[sw_name].table_clear("ipv4_lpm") #next_hop=NULL if i == len(switches) - 1: next_hop = switches[0] else: next_hop = switches[i + 1] sw_port = self.topo.node_to_node_port_num(sw_name, next_hop) dst_sw_mac = self.topo.node_to_node_mac(next_hop, sw_name) #print "table_add at {}:".format(sw_name) for host_ip in IPs: self.controllers[sw_name].table_add("ipv4_lpm", "set_nhop", [str(host_ip)],\ [str(dst_sw_mac), str(sw_port)]) def blackhole(self, args): if args.sw_name == None: pass print "Not implemented yet,please specify the switch name" else: self.controllers[args.sw_name].table_clear("ecmp_group_to_nhop") self.controllers[args.sw_name].table_clear("ipv4_lpm") print args.sw_name, "has been shut down" def remove_cpu(self): # log=open("./router.log","w") # log.write(str(self.topo)) print(1) print(self.topo.get_shortest_paths_between_nodes("s5", "h2")) # print(self.topo["sw-cpu"]) # print(self.topo.network_graph["sw-cpu"]) self.topo.network_graph.remove_node("sw-cpu") # self.topo.save("../p4src_interval/topology.db") # self.topo.load("../p4src_interval/topology.db") # del self.topo #self.topo=Topology(db="../p4src_interval/topology.db") print("\n\n\n\n\n") print(2) print(self.topo.get_shortest_paths_between_nodes("h1", "h8")) # print(self.topo["sw-cpu"]) # print(self.topo.network_graph["sw-cpu"]) # log=open("./router1.log","w") # log.write(str(self.topo)) def reroute(self): #log=open("./router.log","w") #log.write(str(self.topo)) self.topo.network_graph.remove_node("sw-cpu") switch_ecmp_groups = { sw_name: {} for sw_name in self.topo.get_p4switches().keys() } for sw_name, controllers in self.controllers.items(): controllers.table_clear("ecmp_group_to_nhop") controllers.table_clear("ipv4_lpm") for sw_dst in self.topo.get_p4switches(): #if its ourselves we create direct connections if sw_name == sw_dst: for host in self.topo.get_hosts_connected_to(sw_name): sw_port = self.topo.node_to_node_port_num( sw_name, host) host_ip = self.topo.get_host_ip(host) + "/32" host_mac = self.topo.get_host_mac(host) #add rule print "table_add at {}:".format(sw_name) # log.write("[1] table_add ipv4_lpm set_nhop at {} to host {} using port {}\n".format(sw_name,host,sw_port)) self.controllers[sw_name].table_add( "ipv4_lpm", "set_nhop", [str(host_ip)], [str(host_mac), str(sw_port)]) #check if there are directly connected hosts else: if self.topo.get_hosts_connected_to(sw_dst): paths = self.topo.get_shortest_paths_between_nodes( sw_name, sw_dst) for host in self.topo.get_hosts_connected_to(sw_dst): if len(paths) == 1: next_hop = paths[0][1] host_ip = self.topo.get_host_ip(host) + "/24" sw_port = self.topo.node_to_node_port_num( sw_name, next_hop) dst_sw_mac = self.topo.node_to_node_mac( next_hop, sw_name) #add rule print "table_add at {}:".format(sw_name) # log.write("[2] table_add ipv4_lpm set_nhop at {} to host {} using port {} to nexthop {}\n".format(sw_name,host,sw_port,next_hop)) self.controllers[sw_name].table_add( "ipv4_lpm", "set_nhop", [str(host_ip)], [str(dst_sw_mac), str(sw_port)]) elif len(paths) > 1: next_hops = [x[1] for x in paths] dst_macs_ports = [ (self.topo.node_to_node_mac( next_hop, sw_name), self.topo.node_to_node_port_num( sw_name, next_hop)) for next_hop in next_hops ] host_ip = self.topo.get_host_ip(host) + "/24" #check if the ecmp group already exists. The ecmp group is defined by the number of next #ports used, thus we can use dst_macs_ports as key if switch_ecmp_groups[sw_name].get( tuple(dst_macs_ports), None): ecmp_group_id = switch_ecmp_groups[ sw_name].get(tuple(dst_macs_ports), None) print "table_add at {}:".format(sw_name) # log.write("[3] table_add ipv4_lpm ecmp_group at {} to switch {} to paths{}\n".format(sw_name,sw_dst,paths)) self.controllers[sw_name].table_add( "ipv4_lpm", "ecmp_group", [str(host_ip)], [ str(ecmp_group_id), str(len(dst_macs_ports)) ]) #new ecmp group for this switch else: new_ecmp_group_id = len( switch_ecmp_groups[sw_name]) + 1 switch_ecmp_groups[sw_name][tuple( dst_macs_ports)] = new_ecmp_group_id #add group for i, (mac, port) in enumerate(dst_macs_ports): print "table_add at {}:".format( sw_name) #log.write("[4] table_add ipv4_lpm ecmp_group at {} to switch {} to paths{}\n".format(sw_name,sw_dst,paths)) # log.write("[4] table_add ipv4_lpm ecmp_group at {} to switch {} using port {}\n".format(sw_name,sw_dst,port)) self.controllers[sw_name].table_add( "ecmp_group_to_nhop", "set_nhop", [str(new_ecmp_group_id), str(i)], [str(mac), str(port)]) #add forwarding rule print "table_add at {}:".format(sw_name) # log.write("[5] table_add ipv4_lpm ecmp_group at {} to switch {} to paths{}\n".format(sw_name,sw_dst,paths)) self.controllers[sw_name].table_add( "ipv4_lpm", "ecmp_group", [str(host_ip)], [ str(new_ecmp_group_id), str(len(dst_macs_ports)) ])
class NCacheController(object): def __init__(self, sw_name, vtables_num=8): self.topo = Topology(db="../p4/topology.db") self.sw_name = sw_name self.thrift_port = self.topo.get_thrift_port(self.sw_name) self.cpu_port = self.topo.get_cpu_port_index(self.sw_name) self.controller = SimpleSwitchAPI(self.thrift_port) self.custom_calcs = self.controller.get_custom_crc_calcs() self.sketch_register_num = len(self.custom_calcs) self.vtables = [] self.vtables_num = vtables_num # create a pool of ids (as much as the total amount of keys) # this pool will be used to assign index to keys which will be # used to index the cached key counter and the validity register self.ids_pool = range(0, VTABLE_ENTRIES * VTABLE_SLOT_SIZE) # array of bitmap, which marks available slots per cache line # as 0 bits and occupied slots as 1 bits self.mem_pool = [0] * VTABLE_ENTRIES # number of memory slots used (useful for lfu eviction policy) self.used_mem_slots = 0 # dictionary storing the value table index, bitmap and counter/validity # register index in the P4 switch that corresponds to each key self.key_map = {} self.setup() #self.out_of_band_test() def inform_server(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(UNIX_CHANNEL) except socket.error as msg: #print('Error: Unable to contact server for cache operation completion') return sock.sendall(CACHE_INSERT_COMPLETE) # reports the value of counters for each cached key # (used only for debugging purposes) def report_counters(self): for key, val in self.key_map.items(): vt_idx, bitmap, key_idx = val res = self.controller.counter_read(CACHED_KEYS_COUNTER, key_idx) if res != 0: print("[COUNTER] key = " + key + " [ " + str(res.packets) + " ]") # periodically reset registers pertaining to query statistics module of the # P4 switch (count-min sketch registers, bloom filters and counters) def periodic_registers_reset(self): t = threading.Timer(STATISTICS_REFRESH_INTERVAL, self.periodic_registers_reset) t.daemon = True t.start() # before reseting registers check if the cache is utilized above a # threshold (e.g 80%) and evict keys using lfu policy if needed self.cache_lfu_eviction(threshold=0.8, sampling=0.2, to_remove=0.5) # reset bloom filter related registers for i in range(BLOOMF_REGISTERS_NUM): self.controller.register_reset(BLOOMF_REG_PREFIX + str(i + 1)) # reset count min sketch related registers for i in range(SKETCH_REGISTERS_NUM): self.controller.register_reset(SKETCH_REG_PREFIX + str(i + 1)) # reset counter register storing the query frequency of each cached item self.controller.counter_reset(CACHED_KEYS_COUNTER) print("[INFO]: Reset query statistics registers.") # the controller periodically checks if the memory used has exceeded a given threshold # (e.g 80 %) and if that is the case then it evicts keys according to an approximated # LFU policy inspired by REDIS (https://redis.io/topics/lru-cache)) def cache_lfu_eviction(self, threshold=0.8, sampling=0.2, to_remove=0.5): # if the threshold has not been surpassed then nothing to do if self.used_mem_slots <= (threshold * len(self.mem_pool) * VTABLE_SLOT_SIZE): return n_samples = int(sampling * len(self.key_map.items())) samples = random.sample(self.key_map.items(), n_samples) # read the counter for each sample and store them in an array evict_list = [] for key, val in samples: x, y, cnt_idx = self.key_map[key] counter = self.controller.counter_read(CACHED_KEYS_COUNTER, cnt_idx).packets evict_list.append((key, counter)) # sort the array and pick the smallest K-th counters and evict their keys # (this could be achieved more optimally by using quickselect) import operator evict_list.sort(key=operator.itemgetter(1)) for i in range(int(to_remove * n_samples)): curr = evict_list[i] self.evict(curr[0]) def setup(self): if self.cpu_port: self.controller.mirroring_add(CONTROLLER_MIRROR_SESSION, self.cpu_port) # create custom hash functions for count min sketch and bloom filters self.set_crc_custom_hashes() self.create_hashes() # set a daemon to periodically reset registers self.periodic_registers_reset() # spawn new thread to serve incoming udp connections # (i.e hot reports from the switch) #udp_t = threading.Thread(target=self.hot_reports_loop) #udp_t.start() def set_crc_custom_hashes(self): i = 0 for custom_crc32, width in sorted(self.custom_calcs.items()): self.controller.set_crc32_parameters(custom_crc32, crc32_polinomials[i], 0xffffffff, 0xffffffff, True, True) i += 1 def create_hashes(self): self.hashes = [] for i in range(self.sketch_register_num): self.hashes.append( Crc(32, crc32_polinomials[i], True, 0xffffffff, True, 0xffffffff)) # set a static allocation scheme for l2 forwarding where the mac address of # each host is associated with the port connecting this host to the switch def set_forwarding_table(self): for host in self.topo.get_hosts_connected_to(self.sw_name): port = self.topo.node_to_node_port_num(self.sw_name, host) host_mac = self.topo.get_host_mac(host) self.controller.table_add("l2_forward", "set_egress_port", [str(host_mac)], [str(port)]) def set_value_tables(self): for i in range(self.vtables_num): self.controller.table_add("vtable_" + str(i), "process_array_" + str(i), ['1'], []) # this function manages the mapping between between slots in register arrays # and the cached items by implementing the First Fit algorithm described in # Memory Management section of 4.4.2 (netcache paper) def first_fit(self, key, value_size): n_slots = (value_size / (VTABLE_SLOT_SIZE + 1)) + 1 if value_size <= 0: return None if key in self.key_map: return None for idx in range(len(self.mem_pool)): old_bitmap = self.mem_pool[idx] n_zeros = 8 - bin(old_bitmap).count("1") if n_zeros >= n_slots: cnt = 0 bitmap = 0 for i in reversed(range(8)): if cnt >= n_slots: break if not self.bit_is_set(old_bitmap, i): bitmap = bitmap | (1 << i) cnt += 1 # mark last n_slots 0 bits as 1 bits because we assigned # them to the new key and they are now allocated self.mem_pool[idx] = old_bitmap | bitmap self.used_mem_slots += bin(bitmap).count("1") return (idx, bitmap) return None # converts a list of 1s and 0s represented as strings and converts it # to a bitmap using bitwise operations (this intermediate representation # of a list of 1s and 0s is used to avoid low level bitwise logic inside # core implementation logic) def convert_to_bitmap(self, strlist, bitmap_len): bitmap = 0 # supports only bitmaps with multiple of 8 bits size if bitmap_len % 8 != 0: return bitmap for i in strlist: bitmap = bitmap << 1 bitmap = bitmap | int(i) return bitmap # this function checks whether the k-th bit of a given number is set def bit_is_set(self, n, k): if n & (1 << k): return True else: return False # given a key and its associated value, we update the lookup table on # the switch and we also update the value registers with the value # given as argument (stored in multiple slots) def insert(self, key, value, cont=True): # find where to put the value for given key mem_info = self.first_fit(key, len(value)) # if key already exists or not space available then stop if mem_info == None: return vt_index, bitmap = mem_info # keep track of number of bytes of the value written so far cnt = 0 # store the value of the key in the vtables of the switch while # incrementally storing a part of the value at each value table # if the correspoding bit of the bitmap is set for i in range(self.vtables_num): if self.bit_is_set(bitmap, self.vtables_num - i - 1): partial_val = value[cnt:cnt + VTABLE_SLOT_SIZE] self.controller.register_write(VTABLE_NAME_PREFIX + str(i), vt_index, self.str_to_int(partial_val)) cnt += VTABLE_SLOT_SIZE # allocate an id from the pool to index the counter and validity register # (we take the last element of list because in python list is implemented # to optimize for inserting and removing elements from the end of the list) key_index = self.ids_pool.pop() # add the new key to the cache lookup table of the p4 switch self.controller.table_add( NETCACHE_LOOKUP_TABLE, "set_lookup_metadata", [str(self.str_to_int(key))], [str(bitmap), str(vt_index), str(key_index)]) # mark cache entry for this key as valid self.controller.register_write("cache_status", key_index, 1) self.key_map[key] = vt_index, bitmap, key_index # inform the server about the successful cache insertion if cont: self.inform_server() print("Inserted key-value pair to cache: (" + key + "," + value + ")") # converts a string to a bytes representation and afterwards returns # its integer representation of width specified by argument int_width # (seems hacky due to restriction to use python2.7) def str_to_int(self, x, int_width=VTABLE_SLOT_SIZE): if len(x) > int_width: print "Error: Overflow while converting string to int" # add padding with 0x00 if input string size less than int_width bytearr = bytearray(int_width - len(x)) bytearr.extend(x.encode('utf-8')) return struct.unpack(">Q", bytearr)[0] # given an arbitrary sized integer, the max width (in bits) of the integer # it returns the string representation of the number (also stripping it of # any '0x00' characters) (network byte order is assumed) def int_to_packed(self, int_val, max_width=128, word_size=32): num_words = max_width / word_size words = self.int_to_words(int_val, num_words, word_size) fmt = '>%dI' % (num_words) return struct.pack(fmt, *words).strip('\x00') # split up an arbitrary sized integer to words (needed to hack # around struct.pack limitation to convert to byte any integer # greater than 8 bytes) def int_to_words(self, int_val, num_words, word_size): max_int = 2**(word_size * num_words) - 1 max_word_size = 2**word_size - 1 words = [] for _ in range(num_words): word = int_val & max_word_size words.append(int(word)) int_val >>= word_size words.reverse() return words # update the value of the given key with the new value given as argument # (by allowing updates also to be done by the controller, the client is # also able to update keys with values bigger than the previous one) # in netcache paper this restriction is not resolved def update(self, key, value): # if key is not in cache then nothing to do if key not in self.key_map: return # update key-value pair by removing old pair and inserting new one self.evict(key) self.insert(key, value) # evict given key from the cache by deleting its associated entries in # action tables of the switch, by deallocating its memory space and by # marking the cache entry as valid once the deletion is completed def evict(self, key): if key not in self.key_map: return # delete entry from the lookup_table entry_handle = self.controller.get_handle_from_match( NETCACHE_LOOKUP_TABLE, [ str(self.str_to_int(key)), ]) if entry_handle is not None: self.controller.table_delete(NETCACHE_LOOKUP_TABLE, entry_handle) # delete mapping of key from controller's dictionary vt_idx, bitmap, key_idx = self.key_map[key] del self.key_map[key] # deallocate space from memory pool self.mem_pool[vt_idx] = self.mem_pool[vt_idx] ^ bitmap self.used_mem_slots = self.used_mem_slots - bin(bitmap).count("1") # free the id used to index the validity/counter register and append # it back to the id pool of the controller self.ids_pool.append(key_idx) # mark cache entry as valid again (should be the last thing to do) self.controller.register_write("cache_status", key_idx, 1) # used for testing purposes and static population of cache def dummy_populate_vtables(self): test_values_l = [ "alpha", "beta", "gamma", "delta", "epsilon", "zeta", "hita", "theta", "yiota", "kappa", "lambda", "meta" ] test_keys_l = [ "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve" ] cnt = 0 for i in range(11): self.insert(test_keys_l[i], test_values_l[i], False) # handling reports from the switch corresponding to hot keys, updates to # key-value pairs or deletions - this function receives a packet, extracts # its netcache header and manipulates cache based on the operation field # of the netcache header (callback function) def recv_switch_updates(self, pkt): print("Received message from switch") # extract netcache header information if pkt.haslayer(UDP): ncache_header = NetcacheHeader(pkt[UDP].payload) elif pkt.haslayer(TCP): ncache_header = NetcacheHeader(pkt[TCP].payload) key = self.int_to_packed(ncache_header.key, max_width=128) value = self.int_to_packed(ncache_header.value, max_width=1024) op = ncache_header.op if op == NETCACHE_HOT_READ_QUERY: print("Received hot report for key = " + key) # if the netcache header has null value or if the "hot key" # reported doesn't exist then do not update cache if ncache_header.op == NETCACHE_KEY_NOT_FOUND: return self.insert(key, value) elif op == NETCACHE_DELETE_COMPLETE: print("Received query to delete key = " + key) self.evict(key) elif op == NETCACHE_UPDATE_COMPLETE: print("Received query to update key = " + key) self.update(key, value) else: print("Error: unrecognized operation field of netcache header") # sniff infinitely the interface connected to the P4 switch and when a valid netcache # packet is captured, handle the packet via a callback to recv_switch_updates function def hot_reports_loop(self): cpu_port_intf = str(self.topo.get_cpu_port_intf(self.sw_name)) sniff(iface=cpu_port_intf, prn=self.recv_switch_updates, filter="port 50000") def main(self): self.set_forwarding_table() self.set_value_tables() self.dummy_populate_vtables() self.hot_reports_loop()
class Controller(object): def __init__(self): self.topo = Topology(db="topology.db") self.controllers = {} self.init() def init(self): self.connect_to_switches() self.reset_states() self.set_table_defaults() def reset_states(self): [controller.reset_state() for controller in self.controllers.values()] def connect_to_switches(self): for p4switch in self.topo.get_p4switches(): thrift_port = self.topo.get_thrift_port(p4switch) self.controllers[p4switch] = SimpleSwitchAPI(thrift_port) def set_table_defaults(self): for controller in self.controllers.values(): controller.table_set_default("ipv4_lpm", "drop", []) def install_rules(self): for sw_name, controller in self.controllers.items(): for sw_dst in self.topo.get_p4switches(): #if its ourselves we create direct connections if sw_name == sw_dst: for host in self.topo.get_hosts_connected_to(sw_name): sw_port = self.topo.node_to_node_port_num( sw_name, host) host_ip = self.topo.get_host_ip(host) + "/32" host_mac = self.topo.get_host_mac(host) #add rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "set_nhop", [str(host_ip)], [str(host_mac), str(sw_port)]) #check if there are directly connected hosts else: if self.topo.get_hosts_connected_to(sw_dst): paths = self.topo.get_shortest_paths_between_nodes( sw_name, sw_dst) for host in self.topo.get_hosts_connected_to(sw_dst): next_hop = paths[0][1] host_ip = self.topo.get_host_ip(host) + "/24" sw_port = self.topo.node_to_node_port_num( sw_name, next_hop) dst_sw_mac = self.topo.node_to_node_mac( next_hop, sw_name) #add rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add( "ipv4_lpm", "set_nhop", [str(host_ip)], [str(dst_sw_mac), str(sw_port)]) def main(self): self.install_rules()
class RSVPController(object): def __init__(self): """Initializes the topology and data structures """ if not os.path.exists("topology.db"): print("Could not find topology object!!!\n") raise (Exception) self.topo = Topology(db="topology.db") self.controllers = {} self.init() # sorted by timeouts self.current_reservations = {} # initial link capacity self.links_capacity = self.build_links_capacity() self.update_lock = threading.Lock() self.timeout_thread = threading.Thread( target=self.reservations_timeout_thread, args=(1, )) self.timeout_thread.daemon = True self.timeout_thread.start() def init(self): """Connects to switches and resets. """ self.connect_to_switches() self.reset_states() def reset_states(self): """Resets registers, tables, etc. """ [controller.reset_state() for controller in self.controllers.values()] def connect_to_switches(self): """Connects to all the switches in the topology and saves them in self.controllers. """ for p4switch in self.topo.get_p4switches(): thrift_port = self.topo.get_thrift_port(p4switch) self.controllers[p4switch] = SimpleSwitchAPI(thrift_port) def build_links_capacity(self): """Builds link capacities dictionary Returns: dict: {edge: bw} """ links_capacity = {} # Iterates all the edges in the topology formed by switches for src, dst in self.topo.network_graph.keep_only_p4switches().edges: bw = self.topo.network_graph.edges[(src, dst)]['bw'] # add both directions links_capacity[(src, dst)] = bw links_capacity[(dst, src)] = bw return links_capacity def reservations_timeout_thread(self, refresh_rate=1): """Every refresh_rate checks all the reservations. If any times out tries to delete it. Args: refresh_rate (int, optional): Refresh rate. Defaults to 1. """ while True: # sleeps time.sleep(refresh_rate) # locks the self.current_reservations data structure. This is done # because the CLI can also access the reservations. with self.update_lock: to_remove = [] # iterates all the reservations and updates its timeouts # if timeout is reached we delete it for reservation, data in self.current_reservations.items(): data['timeout'] -= refresh_rate # has expired? if data['timeout'] <= 0: to_remove.append(reservation) # removes all the reservations that expired for reservation in to_remove: self.del_reservation(*reservation) def set_mpls_tbl_labels(self): """We set all the table defaults to reach all the hosts/networks in the network """ # for all switches for sw_name, controller in self.controllers.items(): # get all direct hosts and add direct entry for host in self.topo.get_hosts_connected_to(sw_name): sw_port = self.topo.node_to_node_port_num(sw_name, host) host_ip = self.topo.get_host_ip(host) host_mac = self.topo.get_host_mac(host) # adds direct forwarding rule controller.table_add( "FEC_tbl", "ipv4_forward", ["0.0.0.0/0", str(host_ip)], [str(host_mac), str(sw_port)]) for switch in self.topo.get_switches_connected_to(sw_name): sw_port = self.topo.node_to_node_port_num(sw_name, switch) # reverse port mac other_switch_mac = self.topo.node_to_node_mac(switch, sw_name) # we add a normal rule and a penultimate one controller.table_add("mpls_tbl", "mpls_forward", [str(sw_port), '0'], [str(other_switch_mac), str(sw_port)]) controller.table_add("mpls_tbl", "penultimate", [str(sw_port), '1'], [str(other_switch_mac), str(sw_port)]) def build_mpls_path(self, switches_path): """Using a path of switches builds the mpls path. In our simplification labels are port indexes. Args: switches_path (list): path of switches to allocate Returns: list: label path """ # label path label_path = [] # iterate over all pair of switches in the path for current_node, next_node in zip(switches_path, switches_path[1:]): # we get sw1->sw2 port number from topo object label = self.topo.node_to_node_port_num(current_node, next_node) label_path.append(label) return label_path def get_sorted_paths(self, src, dst): """Gets all paths between src, dst sorted by length. This function uses the internal networkx API. Args: src (str): src name dst (str): dst name Returns: list: paths between src and dst """ paths = self.topo.get_all_paths_between_nodes(src, dst) # trim src and dst paths = [x[1:-1] for x in paths] return paths def get_shortest_path(self, src, dst): """Computes shortest path. Simple function used to test the system by always allocating the shortest path. Args: src (str): src name dst (str): dst name Returns: list: shortest path between src,dst """ return self.get_sorted_paths(src, dst)[0] def check_if_reservation_fits(self, path, bw): """Checks if a the candidate reservation fits in the current state of the network. Using the path of switches, checks if all the edges (links) have enough space. Otherwise, returns False. Args: path (list): list of switches bw (float): requested bandwidth in mbps Returns: bool: true if allocation can be performed on path """ # iterates over all pairs of switches (edges) for link in zip(path, path[1:]): # checks if there is enough capacity if (self.links_capacity[link] - bw) < 0: return False return True def add_link_capacity(self, path, bw): """Adds bw capacity to a all the edges along path. This function is used when an allocation is removed. Args: path (list): list of switches bw (float): requested bandwidth in mbps """ # iterates over all pairs of switches (edges) for link in zip(path, path[1:]): # adds capacity self.links_capacity[link] += bw def sub_link_capacity(self, path, bw): """subtracts bw capacity to a all the edges along path. This function is used when an allocation is added. Args: path (list): list of switches bw (float): requested bandwidth in mbps """ # iterates over all pairs of switches (edges) for link in zip(path, path[1:]): # subtracts capacity self.links_capacity[link] -= bw def get_available_path(self, src, dst, bw): """Checks all paths from src to dst and picks the shortest path that can allocate bw. Args: src (str): src name dst (str): dst name bw (float): requested bandwidth in mbps Returns: list/bool: best path/ False if none """ # get all paths sorted from shorter to longer paths = self.get_sorted_paths(src, dst) for path in paths: # checks if the path has capacity if self.check_if_reservation_fits(path, bw): return path return False def get_meter_rates_from_bw(self, bw, burst_size=700000): """Returns the CIR and PIR rates and bursts to configure meters at bw. Args: bw (float): desired bandwdith in mbps burst_size (int, optional): Max capacity of the meter buckets. Defaults to 50000. Returns: list: [(rate1, burst1), (rate2, burst2)] """ rates = [] rates.append((0.125 * bw, burst_size)) rates.append((0.125 * bw, burst_size)) return rates def set_direct_meter_bandwidth(self, sw_name, meter_name, handle, bw): """Sets a meter entry (using a table handle) to color packets using bw mbps Args: sw_name (str): switch name meter_name (str): meter name handle (int): entry handle bw (float): desired bandwidth to rate limit """ rates = self.get_meter_rates_from_bw(bw) self.controllers[sw_name].meter_set_rates(meter_name, handle, rates) def _add_reservation(self, src, dst, duration, bandwidth, priority, path, update): """Adds or updates a single reservation Args: src (str): src name dst (str): dst name duration (float): reservation timeout bandwidth (float): requested bandwidth in mbps priority (int): reservation priority path (list): switch path were to allocate the reservation update (bool): update flag """ # We build the label path. For that we use self.build_mpls_path and # reverse the returned labels, since our rsvp.p4 will push them in # reverse order. label_path = [str(x) for x in self.build_mpls_path(path)[::-1]] # Get required info to add a table rule # get ingress switch as the first node in the path src_gw = path[0] # compute the action name using the length of the labels path action = "mpls_ingress_{}_hop".format(len(label_path)) # src lpm address src_ip = str(self.topo.get_host_ip(src) + "/32") # dst exact address dst_ip = str(self.topo.get_host_ip(dst)) # match list match = [src_ip, dst_ip] # if we have a label path if len(label_path) != 0: # If the entry is new we simply add it if not update: entry_handle = self.controllers[src_gw].table_add( "FEC_tbl", action, match, label_path) self.set_direct_meter_bandwidth(src_gw, "rsvp_meter", entry_handle, bandwidth) # if the entry is being updated we modify if using its handle else: entry = self.current_reservations.get((src, dst), None) entry_handle = self.controllers[src_gw].table_modify( "FEC_tbl", action, entry['handle'], label_path) self.set_direct_meter_bandwidth(src_gw, "rsvp_meter", entry_handle, bandwidth) # udpates controllers link and reservation structures if rules were added succesfully if entry_handle: self.sub_link_capacity(path, bandwidth) self.current_reservations[(src, dst)] = { "timeout": (duration), "bw": (bandwidth), "priority": (priority), 'handle': entry_handle, 'path': path } print("Successful reservation({}->{}): path: {}".format( src, dst, "->".join(path))) else: print("\033[91mFailed reservation({}->{}): path: {}\033[0m". format(src, dst, "->".join(path))) else: print("Warning: Hosts are connected to the same switch!") def add_reservation(self, src, dst, duration, bandwidth, priority): """Adds a new reservation taking into account the priority. This addition can potentially move or delete other allocations. Args: src (str): src name dst (str): dst name duration (float): reservation timeout bandwidth (float): requested bandwidth in mbps priority (int): reservation priority """ # locks the self.current_reservations data structure. This is done # because there is a thread that could access it concurrently. with self.update_lock: # if reservation exists, we allocate it again, by just updating the entry # for that we set the FLAG UPDATE_ENTRY and restore its link capacity # such the new re-allocation with a possible new bw/prioirty can be done # taking new capacities into account. UPDATE_ENTRY = False if self.current_reservations.get((src, dst), None): data = self.current_reservations[(src, dst)] path = data['path'] bw = data['bw'] # updates link capacities self.add_link_capacity(path, bw) UPDATE_ENTRY = True # finds the best (if exists) path to allocate the requestes reservation path = self.get_available_path(src, dst, bandwidth) if path: # add or update the reservation self._add_reservation(src, dst, duration, bandwidth, priority, path, UPDATE_ENTRY) # Cant be allocated! However, it might be possible to re-allocate things else: # check if the flow could be placed removing lower priorities previous_links_capacities = self.links_capacity.copy() for reservation, data in self.current_reservations.items(): # make sure we do not remove ourselves # again in case this is a modification if reservation == (src, dst): continue if data['priority'] < priority: self.add_link_capacity(data['path'], data['bw']) # check if it fits in a newtwork without lower priority flows path = self.get_available_path(src, dst, bandwidth) # we rebalance lower priority reservations if possible if path: # adds main new allocation self._add_reservation(src, dst, duration, bandwidth, priority, path, UPDATE_ENTRY) # re-allocate everything if possible for reservation, data in sorted( self.current_reservations.items(), key=lambda x: x[1]['priority'], reverse=True): if data['priority'] < priority: src, dst = reservation[0], reservation[1] path = self.get_available_path( src, dst, data['bw']) if path: # add or update the reservation self._add_reservation(src, dst, data['timeout'], data['bw'], data['priority'], path, True) else: # delete it data = self.current_reservations[(src, dst)] path = data['path'] bw = data['bw'] self.sub_link_capacity(path, bw) print( "\033[91mDeleting allocation {}->{} due to a higher priority allocation!\033[0m" .format(src, dst)) self.del_reservation(src, dst) else: # restore capacities self.links_capacity = previous_links_capacities # if we failed and it was an entry to be updated we remove it if UPDATE_ENTRY: data = self.current_reservations[(src, dst)] path = data['path'] bw = data['bw'] self.sub_link_capacity(path, bw) print("Deleting new allocation. Does not fit anymore!") self.del_reservation(src, dst) print( "\033[91mRESERVATION FAILURE: no bandwidth available!\033[0m" ) def del_reservation(self, src, dst): """Deletes a reservation between src and dst, if exists. To delete the reservation the self.current_reservations data structure is used to retrieve all the needed information. After deleting the reservation from the ingress switch, path capacities are updated. Args: src (str): src name dst (str): dst name """ # checks if there is an allocation between src->dst entry = self.current_reservations.get((src, dst), None) if entry: # gets handle to delete entry entry_handle = entry['handle'] # gets src ingress switch sw_gw = self.topo.get_host_gateway_name(src) # removes table entry using the handle self.controllers[sw_gw].table_delete("FEC_tbl", entry_handle, True) # updates links capacity self.add_link_capacity(entry['path'], entry['bw']) # removes the reservation from the controllers memory del (self.current_reservations[(src, dst)]) print( "\nRSVP Deleted/Expired Reservation({}->{}): path: {}".format( src, dst, "->".join(entry['path']))) else: print("No entry for {} -> {}".format(src, dst)) def del_all_reservations(self): """Deletes all the current reservations """ # locks the self.current_reservations data structure. This is done # because there is a thread that could access it concurrently. with self.update_lock: # makes a copy of all the reservation pairs reservation_keys = self.current_reservations.keys() for src, dst in reservation_keys: self.del_reservation(src, dst)
class RoutingController(object): def __init__(self): self.topo = Topology(db="./topology.db") #set the topology self.controllers = {} #the switches self.custom_calcs={} self.register_num={} self.registers={} self.init() def init(self): self.connect_to_switches() self.reset_states() self.set_table_defaults() self.set_custom_calcs() self.reset_all_registers() self.set_crc_custom_hashes() def connect_to_switches(self): for p4switch in self.topo.get_p4switches():# topology line 632 thrift_port = self.topo.get_thrift_port(p4switch) self.controllers[p4switch] = SimpleSwitchAPI(thrift_port) def reset_states(self): [controllers.reset_state() for controllers in self.controllers.values()] def set_table_defaults(self): for controllers in self.controllers.values(): controllers.table_set_default("ipv4_lpm", "drop", []) controllers.table_set_default("ecmp_group_to_nhop", "drop", []) def set_custom_calcs(self): for p4switch in self.topo.get_p4switches(): self.custom_calcs[p4switch]=self.controllers[p4switch].get_custom_crc_calcs() self.register_num[p4switch] =len(self.custom_calcs[p4switch]) def reset_all_registers(self): for sw, controller in self.controllers.items(): for register in controller.get_register_arrays(): controller.register_reset(register) def set_crc_custom_hashes(self): for sw_name in self.controllers.keys(): i = 0 for custom_crc32, width in sorted(self.custom_calcs[sw_name].items()): self.controllers[sw_name].set_crc32_parameters(custom_crc32, crc32_polinomials[i], 0xffffffff, 0xffffffff, True, True) i+=1 def route(self): switch_ecmp_groups = {sw_name:{} for sw_name in self.topo.get_p4switches().keys()} # self.topo.network_graph.remove_node("sw-cpu") for sw_name, controllers in self.controllers.items(): for sw_dst in self.topo.get_p4switches(): #if its ourselves we create direct connections if sw_name == sw_dst: for host in self.topo.get_hosts_connected_to(sw_name): sw_port = self.topo.node_to_node_port_num(sw_name, host) host_ip = self.topo.get_host_ip(host) + "/32" host_mac = self.topo.get_host_mac(host) #add rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add("ipv4_lpm", "set_nhop", [str(host_ip)], [str(host_mac), str(sw_port)]) #check if there are directly connected hosts else: if self.topo.get_hosts_connected_to(sw_dst): paths = self.topo.get_shortest_paths_between_nodes(sw_name, sw_dst) for host in self.topo.get_hosts_connected_to(sw_dst): if len(paths) == 1: next_hop = paths[0][1] host_ip = self.topo.get_host_ip(host) + "/24" sw_port = self.topo.node_to_node_port_num(sw_name, next_hop) dst_sw_mac = self.topo.node_to_node_mac(next_hop, sw_name) #add rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add("ipv4_lpm", "set_nhop", [str(host_ip)], [str(dst_sw_mac), str(sw_port)]) elif len(paths) > 1: next_hops = [x[1] for x in paths] dst_macs_ports = [(self.topo.node_to_node_mac(next_hop, sw_name), self.topo.node_to_node_port_num(sw_name, next_hop)) for next_hop in next_hops] host_ip = self.topo.get_host_ip(host) + "/24" #check if the ecmp group already exists. The ecmp group is defined by the number of next #ports used, thus we can use dst_macs_ports as key if switch_ecmp_groups[sw_name].get(tuple(dst_macs_ports), None): ecmp_group_id = switch_ecmp_groups[sw_name].get(tuple(dst_macs_ports), None) print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add("ipv4_lpm", "ecmp_group", [str(host_ip)], [str(ecmp_group_id), str(len(dst_macs_ports))]) #new ecmp group for this switch else: new_ecmp_group_id = len(switch_ecmp_groups[sw_name]) + 1 switch_ecmp_groups[sw_name][tuple(dst_macs_ports)] = new_ecmp_group_id #add group for i, (mac, port) in enumerate(dst_macs_ports): print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add("ecmp_group_to_nhop", "set_nhop", [str(new_ecmp_group_id), str(i)], [str(mac), str(port)]) #add forwarding rule print "table_add at {}:".format(sw_name) self.controllers[sw_name].table_add("ipv4_lpm", "ecmp_group", [str(host_ip)], [str(new_ecmp_group_id), str(len(dst_macs_ports))]) def main(self): self.route() # for switch_id, controller in enumerate(self.controllers.values()): # controller.register_write("switch_id", 0, switch_id) # controller.register_write("swap_control", 0, 0) # controller.register_write("sketch_fg", 0, 0) # controller.register_write("previous_ingress_timestamp", 0, 0) for switch_id, switch_name in enumerate(self.controllers.keys()): print "{} {}".format(switch_id, switch_name)