Ejemplo n.º 1
0
def process_network_AMPL_model(net_name,out_q):
	filename_res=topology_dir+"/results.txt."+net_name
	filename_net=topology_dir+"/network.xml."+net_name
	(G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net)
	(requests,faults) = f_t_parser.parse_ampl_results(filename=filename_res)
	print len(requests), 'requests loaded'
	print len(faults), 'faults loaded'
	print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts'
	mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping)
	ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports)

	print "\n# Smart instance "+net_name+" with results from AMPL model..."
	(fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests,faults,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename_res,confirm_cache_loading=False,dpctl_script=False)
	
	flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
	tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global']
	'''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''

	D = len(requests)
	F = len(faults)
	print 'O(D*F) = %d*%d = %d'%(D,F,D*F)
	stats = [net_name+" AMPL model",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}]
	out_q.put(stats)
	with open("tmp/"+str(net_name)+" AMPL model.txt", "a+") as out_file:
		out_file.write(str(stats)+"\n")
	return stats
Ejemplo n.º 2
0
def process_network_E2E_PP(net_name,out_q):
	filename_res=topology_dir+"/results.txt."+net_name
	filename_net=topology_dir+"/network.xml."+net_name
	(G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net)
	(requests,faults) = f_t_parser.parse_ampl_results(filename=filename_res)
	print len(requests), 'requests loaded'
	print len(faults), 'faults loaded'
	print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts'
	mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping)
	ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports)

	print "\n# Dumb instance "+net_name+" with end-to-end path protection (bp_node_disj=True...)"
	# we take requests just for its keys, but primary/backup paths are calculated by execute_instance()
	demands = {dem : 1 for dem in requests.keys() }
	N = G.number_of_edges()-len(hosts)
	G_dir = G.to_directed()
	for e in G_dir.edges():
		G_dir.edge[e[0]][e[1]] = {'capacity': N*N*10}
	fc = execute_instance(G_dir, demands, bp_node_disj=True)
	(requests_E2E,faults_E2E) = create_requests_faults_dict(fc.pps,fc.bps)
	(fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests_E2E,faults_E2E,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename_res+"E2E",confirm_cache_loading=False,dpctl_script=False)

	flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
	tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global']
	'''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''
	stats = [net_name+" E2E PP",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}]
	out_q.put(stats)
	with open("tmp/"+str(net_name)+" E2E PP.txt", "a+") as out_file:
		out_file.write(str(stats)+"\n")
	return stats
Ejemplo n.º 3
0
def process_network_E2E_PP(net_name, out_q):
    filename_res = topology_dir + "/results.txt." + net_name
    filename_net = topology_dir + "/network.xml." + net_name
    (G, pos, hosts, switches,
     mapping) = f_t_parser.parse_network_xml(filename=filename_net)
    (requests, faults) = f_t_parser.parse_ampl_results(filename=filename_res)
    print len(requests), 'requests loaded'
    print len(faults), 'faults loaded'
    print 'Network has', len(switches), 'switches,', G.number_of_edges() - len(
        hosts), 'links and', len(hosts), 'hosts'
    mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping)
    ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports)

    print "\n# Dumb instance " + net_name + " with end-to-end path protection (bp_node_disj=True...)"
    # we take requests just for its keys, but primary/backup paths are calculated by execute_instance()
    demands = {dem: 1 for dem in requests.keys()}
    N = G.number_of_edges() - len(hosts)
    G_dir = G.to_directed()
    for e in G_dir.edges():
        G_dir.edge[e[0]][e[1]] = {'capacity': N * N * 10}
    fc = execute_instance(G_dir, demands, bp_node_disj=True)
    (requests_E2E, faults_E2E) = create_requests_faults_dict(fc.pps, fc.bps)
    (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict,
     flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(
         requests_E2E,
         faults_E2E,
         ports_dict,
         match_flow=f_t_parser.get_mac_match_mininet,
         check_cache=False,
         filename=filename_res + "E2E",
         confirm_cache_loading=False,
         dpctl_script=False)

    flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
    tot_flows = [
        flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys()
        if node != 'global'
    ]
    '''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''
    stats = [
        net_name + " E2E PP", {
            'min': min(tot_flows),
            'avg': sum(tot_flows) / float(len((tot_flows))),
            'max': max(tot_flows)
        }
    ]
    out_q.put(stats)
    with open("tmp/" + str(net_name) + " E2E PP.txt", "a+") as out_file:
        out_file.write(str(stats) + "\n")
    return stats
Ejemplo n.º 4
0
def process_network_AMPL_model(net_name, out_q):
    filename_res = topology_dir + "/results.txt." + net_name
    filename_net = topology_dir + "/network.xml." + net_name
    (G, pos, hosts, switches,
     mapping) = f_t_parser.parse_network_xml(filename=filename_net)
    (requests, faults) = f_t_parser.parse_ampl_results(filename=filename_res)
    print len(requests), 'requests loaded'
    print len(faults), 'faults loaded'
    print 'Network has', len(switches), 'switches,', G.number_of_edges() - len(
        hosts), 'links and', len(hosts), 'hosts'
    mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping)
    ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports)

    print "\n# Smart instance " + net_name + " with results from AMPL model..."
    (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict,
     flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(
         requests,
         faults,
         ports_dict,
         match_flow=f_t_parser.get_mac_match_mininet,
         check_cache=False,
         filename=filename_res,
         confirm_cache_loading=False,
         dpctl_script=False)

    flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
    tot_flows = [
        flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys()
        if node != 'global'
    ]
    '''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''

    D = len(requests)
    F = len(faults)
    print 'O(D*F) = %d*%d = %d' % (D, F, D * F)
    stats = [
        net_name + " AMPL model", {
            'min': min(tot_flows),
            'avg': sum(tot_flows) / float(len((tot_flows))),
            'max': max(tot_flows)
        }
    ]
    out_q.put(stats)
    with open("tmp/" + str(net_name) + " AMPL model.txt", "a+") as out_file:
        out_file.write(str(stats) + "\n")
    return stats