def process_network_AMPL_model(net_name,out_q): filename_res=topology_dir+"/results.txt."+net_name filename_net=topology_dir+"/network.xml."+net_name (G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net) (requests,faults) = f_t_parser.parse_ampl_results(filename=filename_res) print len(requests), 'requests loaded' print len(faults), 'faults loaded' print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts' mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping) ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports) print "\n# Smart instance "+net_name+" with results from AMPL model..." (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests,faults,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename_res,confirm_cache_loading=False,dpctl_script=False) flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict) tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global'] '''print 'min',min(tot_flows) print 'avg',sum(tot_flows)/float(len((tot_flows))) print 'max',max(tot_flows)''' D = len(requests) F = len(faults) print 'O(D*F) = %d*%d = %d'%(D,F,D*F) stats = [net_name+" AMPL model",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}] out_q.put(stats) with open("tmp/"+str(net_name)+" AMPL model.txt", "a+") as out_file: out_file.write(str(stats)+"\n") return stats
def process_network_E2E_PP(net_name,out_q): filename_res=topology_dir+"/results.txt."+net_name filename_net=topology_dir+"/network.xml."+net_name (G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net) (requests,faults) = f_t_parser.parse_ampl_results(filename=filename_res) print len(requests), 'requests loaded' print len(faults), 'faults loaded' print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts' mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping) ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports) print "\n# Dumb instance "+net_name+" with end-to-end path protection (bp_node_disj=True...)" # we take requests just for its keys, but primary/backup paths are calculated by execute_instance() demands = {dem : 1 for dem in requests.keys() } N = G.number_of_edges()-len(hosts) G_dir = G.to_directed() for e in G_dir.edges(): G_dir.edge[e[0]][e[1]] = {'capacity': N*N*10} fc = execute_instance(G_dir, demands, bp_node_disj=True) (requests_E2E,faults_E2E) = create_requests_faults_dict(fc.pps,fc.bps) (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests_E2E,faults_E2E,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename_res+"E2E",confirm_cache_loading=False,dpctl_script=False) flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict) tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global'] '''print 'min',min(tot_flows) print 'avg',sum(tot_flows)/float(len((tot_flows))) print 'max',max(tot_flows)''' stats = [net_name+" E2E PP",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}] out_q.put(stats) with open("tmp/"+str(net_name)+" E2E PP.txt", "a+") as out_file: out_file.write(str(stats)+"\n") return stats
def __init__(self, *args, **kwargs): super(SPIDER, self).__init__(*args, **kwargs) results_hash = SPIDER_parser.md5sum_results() if SPIDER_parser.network_has_changed(results_hash): SPIDER_parser.erase_figs_folder() (self.requests, self.faults) = SPIDER_parser.parse_ampl_results_if_not_cached() print len(self.requests), 'requests loaded' print len(self.faults), 'faults loaded' print "Building network graph from network.xml..." # G is a NetworkX Graph object (self.G, self.pos, self.hosts, self.switches, self.mapping) = SPIDER_parser.parse_network_xml() print 'Network has', len( self.switches), 'switches,', self.G.number_of_edges() - len( self.hosts), 'links and', len(self.hosts), 'hosts' print "NetworkX to Mininet topology conversion..." # mn_topo is a Mininet Topo object self.mn_topo = SPIDER_parser.networkx_to_mininet_topo( self.G, self.hosts, self.switches, self.mapping) # mn_net is a Mininet object self.mn_net = SPIDER_parser.create_mininet_net(self.mn_topo) SPIDER_parser.launch_mininet(self.mn_net) self.ports_dict = SPIDER_parser.adapt_mn_topo_ports_to_old_API( self.mn_topo.ports) SPIDER_parser.mn_setup_MAC_and_IP(self.mn_net) SPIDER_parser.mn_setup_static_ARP_entries(self.mn_net) SPIDER_parser.draw_network_topology(self.G, self.pos, self.ports_dict, self.hosts) (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_detection_timeouts_dict, self.flow_entries_with_flowlet_timeouts_dict ) = SPIDER_parser.generate_flow_entries_dict( self.requests, self.faults, self.ports_dict, match_flow=SPIDER_parser.get_mac_match_mininet, check_cache=True, dpctl_script=True) #SPIDER_parser.print_flow_stats(SPIDER_parser.get_flow_stats_dict(self.flow_entries_dict)) # Associates dp_id to datapath object self.dp_dictionary = dict() # Associates dp_id to a dict associating port<->MAC address self.ports_mac_dict = dict() # Needed by SPIDER_ctrl_REST self.SPIDER_parser = SPIDER_parser
def process_network_E2E_PP(net_name, out_q): filename_res = topology_dir + "/results.txt." + net_name filename_net = topology_dir + "/network.xml." + net_name (G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net) (requests, faults) = f_t_parser.parse_ampl_results(filename=filename_res) print len(requests), 'requests loaded' print len(faults), 'faults loaded' print 'Network has', len(switches), 'switches,', G.number_of_edges() - len( hosts), 'links and', len(hosts), 'hosts' mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping) ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports) print "\n# Dumb instance " + net_name + " with end-to-end path protection (bp_node_disj=True...)" # we take requests just for its keys, but primary/backup paths are calculated by execute_instance() demands = {dem: 1 for dem in requests.keys()} N = G.number_of_edges() - len(hosts) G_dir = G.to_directed() for e in G_dir.edges(): G_dir.edge[e[0]][e[1]] = {'capacity': N * N * 10} fc = execute_instance(G_dir, demands, bp_node_disj=True) (requests_E2E, faults_E2E) = create_requests_faults_dict(fc.pps, fc.bps) (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict( requests_E2E, faults_E2E, ports_dict, match_flow=f_t_parser.get_mac_match_mininet, check_cache=False, filename=filename_res + "E2E", confirm_cache_loading=False, dpctl_script=False) flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict) tot_flows = [ flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node != 'global' ] '''print 'min',min(tot_flows) print 'avg',sum(tot_flows)/float(len((tot_flows))) print 'max',max(tot_flows)''' stats = [ net_name + " E2E PP", { 'min': min(tot_flows), 'avg': sum(tot_flows) / float(len((tot_flows))), 'max': max(tot_flows) } ] out_q.put(stats) with open("tmp/" + str(net_name) + " E2E PP.txt", "a+") as out_file: out_file.write(str(stats) + "\n") return stats
def __init__(self, *args, **kwargs): super(OpenStateFaultTolerance, self).__init__(*args, **kwargs) delta_6 = float(os.environ['delta_6']) delta_7 = float(os.environ['delta_7']) delta_5 = float(os.environ['delta_5']) f_t_parser.detection_timeouts_list = [(delta_6,delta_7,delta_5)] self.REALIZATIONS_NUM = int(os.environ['REALIZATIONS_NUM']) results_hash = f_t_parser.md5sum_results() if f_t_parser.network_has_changed(results_hash): f_t_parser.erase_figs_folder() (self.requests,self.faults) = f_t_parser.parse_ampl_results_if_not_cached() print len(self.requests), 'requests loaded' print len(self.faults), 'faults loaded' print "Building network graph from network.xml..." # G is a NetworkX Graph object (self.G, self.pos, self.hosts, self.switches, self.mapping) = f_t_parser.parse_network_xml() print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts' print "NetworkX to Mininet topology conversion..." # mn_topo is a Mininet Topo object self.mn_topo = f_t_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping) # mn_net is a Mininet object self.mn_net = f_t_parser.create_mininet_net(self.mn_topo) f_t_parser.launch_mininet(self.mn_net) self.ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports) f_t_parser.mn_setup_MAC_and_IP(self.mn_net) f_t_parser.mn_setup_static_ARP_entries(self.mn_net) f_t_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts) (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_timeout_dict, self.flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False) # Associates dp_id to datapath object self.dp_dictionary=dict() # Associates dp_id to a dict associating port<->MAC address self.ports_mac_dict=dict() # Needed by fault_tolerance_rest self.f_t_parser = f_t_parser # switch counter self.switch_count = 0
def process_network_AMPL_model(net_name, out_q): filename_res = topology_dir + "/results.txt." + net_name filename_net = topology_dir + "/network.xml." + net_name (G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net) (requests, faults) = f_t_parser.parse_ampl_results(filename=filename_res) print len(requests), 'requests loaded' print len(faults), 'faults loaded' print 'Network has', len(switches), 'switches,', G.number_of_edges() - len( hosts), 'links and', len(hosts), 'hosts' mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping) ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports) print "\n# Smart instance " + net_name + " with results from AMPL model..." (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict( requests, faults, ports_dict, match_flow=f_t_parser.get_mac_match_mininet, check_cache=False, filename=filename_res, confirm_cache_loading=False, dpctl_script=False) flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict) tot_flows = [ flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node != 'global' ] '''print 'min',min(tot_flows) print 'avg',sum(tot_flows)/float(len((tot_flows))) print 'max',max(tot_flows)''' D = len(requests) F = len(faults) print 'O(D*F) = %d*%d = %d' % (D, F, D * F) stats = [ net_name + " AMPL model", { 'min': min(tot_flows), 'avg': sum(tot_flows) / float(len((tot_flows))), 'max': max(tot_flows) } ] out_q.put(stats) with open("tmp/" + str(net_name) + " AMPL model.txt", "a+") as out_file: out_file.write(str(stats) + "\n") return stats
def __init__(self, *args, **kwargs): super(SPIDER, self).__init__(*args, **kwargs) results_hash = SPIDER_parser.md5sum_results() if SPIDER_parser.network_has_changed(results_hash): SPIDER_parser.erase_figs_folder() (self.requests,self.faults) = SPIDER_parser.parse_ampl_results_if_not_cached() print len(self.requests), 'requests loaded' print len(self.faults), 'faults loaded' print "Building network graph from network.xml..." # G is a NetworkX Graph object (self.G, self.pos, self.hosts, self.switches, self.mapping) = SPIDER_parser.parse_network_xml() print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts' print "NetworkX to Mininet topology conversion..." # mn_topo is a Mininet Topo object self.mn_topo = SPIDER_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping) # mn_net is a Mininet object self.mn_net = SPIDER_parser.create_mininet_net(self.mn_topo) SPIDER_parser.launch_mininet(self.mn_net) self.ports_dict = SPIDER_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports) SPIDER_parser.mn_setup_MAC_and_IP(self.mn_net) SPIDER_parser.mn_setup_static_ARP_entries(self.mn_net) SPIDER_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts) (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_detection_timeouts_dict, self.flow_entries_with_flowlet_timeouts_dict) = SPIDER_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=SPIDER_parser.get_mac_match_mininet,check_cache=True,dpctl_script=True) #SPIDER_parser.print_flow_stats(SPIDER_parser.get_flow_stats_dict(self.flow_entries_dict)) # Associates dp_id to datapath object self.dp_dictionary=dict() # Associates dp_id to a dict associating port<->MAC address self.ports_mac_dict=dict() # Needed by SPIDER_ctrl_REST self.SPIDER_parser = SPIDER_parser
def __init__(self, *args, **kwargs): super(OpenStateFaultTolerance, self).__init__(*args, **kwargs) DELTA_6_VALUES = eval( os.environ['DELTA_6_VALUES']) # ugly and dangerous! delta_7 = float(os.environ['delta_7']) delta_5 = float(os.environ['delta_5']) f_t_parser.detection_timeouts_list = [(x, delta_7, delta_5) for x in DELTA_6_VALUES] results_hash = f_t_parser.md5sum_results() if f_t_parser.network_has_changed(results_hash): f_t_parser.erase_figs_folder() (self.requests, self.faults) = f_t_parser.parse_ampl_results_if_not_cached() print len(self.requests), 'requests loaded' print len(self.faults), 'faults loaded' print "Building network graph from network.xml..." # G is a NetworkX Graph object (self.G, self.pos, self.hosts, self.switches, self.mapping) = f_t_parser.parse_network_xml() print 'Network has', len( self.switches), 'switches,', self.G.number_of_edges() - len( self.hosts), 'links and', len(self.hosts), 'hosts' print "NetworkX to Mininet topology conversion..." # mn_topo is a Mininet Topo object self.mn_topo = f_t_parser.networkx_to_mininet_topo( self.G, self.hosts, self.switches, self.mapping) # mn_net is a Mininet object self.mn_net = f_t_parser.create_mininet_net(self.mn_topo) f_t_parser.launch_mininet(self.mn_net) self.ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API( self.mn_topo.ports) f_t_parser.mn_setup_MAC_and_IP(self.mn_net) f_t_parser.mn_setup_static_ARP_entries(self.mn_net) f_t_parser.draw_network_topology(self.G, self.pos, self.ports_dict, self.hosts) (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_timeout_dict, self.flow_entries_with_burst_dict ) = f_t_parser.generate_flow_entries_dict( self.requests, self.faults, self.ports_dict, match_flow=f_t_parser.get_mac_match_mininet, check_cache=False) # Associates dp_id to datapath object self.dp_dictionary = dict() # Associates dp_id to a dict associating port<->MAC address self.ports_mac_dict = dict() # Needed by fault_tolerance_rest self.f_t_parser = f_t_parser # switch counter self.switch_count = 0