コード例 #1
0
ファイル: table2.py プロジェクト: minicz/spider
def process_network_AMPL_model(net_name,out_q):
	filename_res=topology_dir+"/results.txt."+net_name
	filename_net=topology_dir+"/network.xml."+net_name
	(G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net)
	(requests,faults) = f_t_parser.parse_ampl_results(filename=filename_res)
	print len(requests), 'requests loaded'
	print len(faults), 'faults loaded'
	print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts'
	mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping)
	ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports)

	print "\n# Smart instance "+net_name+" with results from AMPL model..."
	(fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests,faults,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename_res,confirm_cache_loading=False,dpctl_script=False)
	
	flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
	tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global']
	'''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''

	D = len(requests)
	F = len(faults)
	print 'O(D*F) = %d*%d = %d'%(D,F,D*F)
	stats = [net_name+" AMPL model",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}]
	out_q.put(stats)
	with open("tmp/"+str(net_name)+" AMPL model.txt", "a+") as out_file:
		out_file.write(str(stats)+"\n")
	return stats
コード例 #2
0
ファイル: table2.py プロジェクト: minicz/spider
def process_network_E2E_PP(net_name,out_q):
	filename_res=topology_dir+"/results.txt."+net_name
	filename_net=topology_dir+"/network.xml."+net_name
	(G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net)
	(requests,faults) = f_t_parser.parse_ampl_results(filename=filename_res)
	print len(requests), 'requests loaded'
	print len(faults), 'faults loaded'
	print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts'
	mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping)
	ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports)

	print "\n# Dumb instance "+net_name+" with end-to-end path protection (bp_node_disj=True...)"
	# we take requests just for its keys, but primary/backup paths are calculated by execute_instance()
	demands = {dem : 1 for dem in requests.keys() }
	N = G.number_of_edges()-len(hosts)
	G_dir = G.to_directed()
	for e in G_dir.edges():
		G_dir.edge[e[0]][e[1]] = {'capacity': N*N*10}
	fc = execute_instance(G_dir, demands, bp_node_disj=True)
	(requests_E2E,faults_E2E) = create_requests_faults_dict(fc.pps,fc.bps)
	(fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests_E2E,faults_E2E,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename_res+"E2E",confirm_cache_loading=False,dpctl_script=False)

	flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
	tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global']
	'''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''
	stats = [net_name+" E2E PP",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}]
	out_q.put(stats)
	with open("tmp/"+str(net_name)+" E2E PP.txt", "a+") as out_file:
		out_file.write(str(stats)+"\n")
	return stats
コード例 #3
0
ファイル: table2.py プロジェクト: rubiruchi/spider
def process_NxN_greedy(N, out_q):
    G, demands = create_square_network(N,
                                       link_capacity=N * N * 10,
                                       demand_volume=1)
    print "\n# Smart instance " + str(N) + "x" + str(
        N) + " with link cost function and bp_node_disj=False..."
    fc = execute_instance(G, demands, cost_func=cost_func_inv)
    ports_dict = create_ports_dict(G, demands)
    (requests, faults) = create_requests_faults_dict(fc.pps, fc.bps)
    # fictitious filename, just to caching purpose
    filename = str(N) + 'X' + str(N) + 'greedy.txt'
    (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict,
     flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(
         requests,
         faults,
         ports_dict,
         match_flow=f_t_parser.get_mac_match_mininet,
         check_cache=False,
         filename=filename,
         confirm_cache_loading=False,
         dpctl_script=False)

    flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
    tot_flows = [
        flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys()
        if node != 'global'
    ]
    '''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''

    D = len(demands)
    F = len(G.edges()) / 2 if isinstance(G, nx.DiGraph) else len(G.edges())
    print 'O(D*F) = %d*%d = %d' % (D, F, D * F)
    stats = [
        str(N) + "x" + str(N) + " greedy", {
            'min': min(tot_flows),
            'avg': sum(tot_flows) / float(len((tot_flows))),
            'max': max(tot_flows)
        }
    ]
    out_q.put(stats)
    with open("tmp/" + str(N) + "x" + str(N) + " greedy.txt",
              "a+") as out_file:
        out_file.write(str(stats) + "\n")
    return stats
コード例 #4
0
ファイル: SPIDER_ctrl.py プロジェクト: OpenState-SDN/spider
    def configure_stateful_stages(self, datapath):
        node_dict = SPIDER_parser.create_node_dict(self.ports_dict,self.requests)

        self.send_table_mod(datapath, table_id=2)
        self.send_key_lookup(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST])
        self.send_key_update(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST])

        self.send_table_mod(datapath, table_id=3)
        self.send_key_lookup(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA])
        self.send_key_update(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA])
コード例 #5
0
ファイル: table2.py プロジェクト: rubiruchi/spider
def process_NxN_E2E_PP(N, out_q):
    G, demands = create_square_network(N,
                                       link_capacity=N * N * 10,
                                       demand_volume=1)
    print "\n# Dumb instance " + str(N) + "x" + str(
        N) + " with end-to-end path protection (bp_node_disj=True...)"
    fc = execute_instance(G, demands, bp_node_disj=True)
    ports_dict = create_ports_dict(G, demands)
    (requests, faults) = create_requests_faults_dict(fc.pps, fc.bps)
    # fictitious filename, just to caching purpose
    filename = str(N) + 'X' + str(N) + 'E2E.txt'
    (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict,
     flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(
         requests,
         faults,
         ports_dict,
         match_flow=f_t_parser.get_mac_match_mininet,
         check_cache=False,
         filename=filename,
         confirm_cache_loading=False,
         dpctl_script=False)

    flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
    tot_flows = [
        flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys()
        if node != 'global'
    ]
    '''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''
    stats = [
        str(N) + "x" + str(N) + " E2E PP", {
            'min': min(tot_flows),
            'avg': sum(tot_flows) / float(len((tot_flows))),
            'max': max(tot_flows)
        }
    ]
    out_q.put(stats)
    with open("tmp/" + str(N) + "x" + str(N) + " E2E PP.txt",
              "a+") as out_file:
        out_file.write(str(stats) + "\n")
    return stats
コード例 #6
0
ファイル: table2.py プロジェクト: rubiruchi/spider
def process_network_E2E_PP(net_name, out_q):
    filename_res = topology_dir + "/results.txt." + net_name
    filename_net = topology_dir + "/network.xml." + net_name
    (G, pos, hosts, switches,
     mapping) = f_t_parser.parse_network_xml(filename=filename_net)
    (requests, faults) = f_t_parser.parse_ampl_results(filename=filename_res)
    print len(requests), 'requests loaded'
    print len(faults), 'faults loaded'
    print 'Network has', len(switches), 'switches,', G.number_of_edges() - len(
        hosts), 'links and', len(hosts), 'hosts'
    mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping)
    ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports)

    print "\n# Dumb instance " + net_name + " with end-to-end path protection (bp_node_disj=True...)"
    # we take requests just for its keys, but primary/backup paths are calculated by execute_instance()
    demands = {dem: 1 for dem in requests.keys()}
    N = G.number_of_edges() - len(hosts)
    G_dir = G.to_directed()
    for e in G_dir.edges():
        G_dir.edge[e[0]][e[1]] = {'capacity': N * N * 10}
    fc = execute_instance(G_dir, demands, bp_node_disj=True)
    (requests_E2E, faults_E2E) = create_requests_faults_dict(fc.pps, fc.bps)
    (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict,
     flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(
         requests_E2E,
         faults_E2E,
         ports_dict,
         match_flow=f_t_parser.get_mac_match_mininet,
         check_cache=False,
         filename=filename_res + "E2E",
         confirm_cache_loading=False,
         dpctl_script=False)

    flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
    tot_flows = [
        flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys()
        if node != 'global'
    ]
    '''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''
    stats = [
        net_name + " E2E PP", {
            'min': min(tot_flows),
            'avg': sum(tot_flows) / float(len((tot_flows))),
            'max': max(tot_flows)
        }
    ]
    out_q.put(stats)
    with open("tmp/" + str(net_name) + " E2E PP.txt", "a+") as out_file:
        out_file.write(str(stats) + "\n")
    return stats
コード例 #7
0
ファイル: fig7_ryu_app.py プロジェクト: rubiruchi/spider
    def configure_stateful_stages(self, datapath):
        node_dict = f_t_parser.create_node_dict(self.ports_dict, self.requests)

        self.send_table_mod(datapath, table_id=2)
        self.send_key_lookup(
            datapath,
            table_id=2,
            fields=[ofproto.OXM_OF_ETH_SRC, ofproto.OXM_OF_ETH_DST])
        self.send_key_update(
            datapath,
            table_id=2,
            fields=[ofproto.OXM_OF_ETH_SRC, ofproto.OXM_OF_ETH_DST])

        self.send_table_mod(datapath, table_id=3)
        self.send_key_lookup(datapath,
                             table_id=3,
                             fields=[ofproto.OXM_OF_METADATA])
        self.send_key_update(datapath,
                             table_id=3,
                             fields=[ofproto.OXM_OF_METADATA])
コード例 #8
0
ファイル: table2.py プロジェクト: minicz/spider
def process_NxN_E2E_PP(N,out_q):
	G, demands = create_square_network(N, link_capacity=N*N*10, demand_volume=1)
	print "\n# Dumb instance "+str(N)+"x"+str(N)+" with end-to-end path protection (bp_node_disj=True...)"
	fc = execute_instance(G, demands, bp_node_disj=True)
	ports_dict = create_ports_dict(G, demands)
	(requests,faults) = create_requests_faults_dict(fc.pps,fc.bps)
	# fictitious filename, just to caching purpose
	filename=str(N)+'X'+str(N)+'E2E.txt'
	(fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests,faults,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename,confirm_cache_loading=False,dpctl_script=False)
	
	flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
	tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global']
	'''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''
	stats = [str(N)+"x"+str(N)+" E2E PP",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}]
	out_q.put(stats)
	with open("tmp/"+str(N)+"x"+str(N)+" E2E PP.txt", "a+") as out_file:
		out_file.write(str(stats)+"\n")
	return stats
コード例 #9
0
ファイル: table2.py プロジェクト: rubiruchi/spider
def process_network_AMPL_model(net_name, out_q):
    filename_res = topology_dir + "/results.txt." + net_name
    filename_net = topology_dir + "/network.xml." + net_name
    (G, pos, hosts, switches,
     mapping) = f_t_parser.parse_network_xml(filename=filename_net)
    (requests, faults) = f_t_parser.parse_ampl_results(filename=filename_res)
    print len(requests), 'requests loaded'
    print len(faults), 'faults loaded'
    print 'Network has', len(switches), 'switches,', G.number_of_edges() - len(
        hosts), 'links and', len(hosts), 'hosts'
    mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping)
    ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports)

    print "\n# Smart instance " + net_name + " with results from AMPL model..."
    (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict,
     flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(
         requests,
         faults,
         ports_dict,
         match_flow=f_t_parser.get_mac_match_mininet,
         check_cache=False,
         filename=filename_res,
         confirm_cache_loading=False,
         dpctl_script=False)

    flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
    tot_flows = [
        flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys()
        if node != 'global'
    ]
    '''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''

    D = len(requests)
    F = len(faults)
    print 'O(D*F) = %d*%d = %d' % (D, F, D * F)
    stats = [
        net_name + " AMPL model", {
            'min': min(tot_flows),
            'avg': sum(tot_flows) / float(len((tot_flows))),
            'max': max(tot_flows)
        }
    ]
    out_q.put(stats)
    with open("tmp/" + str(net_name) + " AMPL model.txt", "a+") as out_file:
        out_file.write(str(stats) + "\n")
    return stats
コード例 #10
0
ファイル: table2.py プロジェクト: minicz/spider
def process_NxN_greedy(N,out_q):
	G, demands = create_square_network(N, link_capacity=N*N*10, demand_volume=1)	
	print "\n# Smart instance "+str(N)+"x"+str(N)+" with link cost function and bp_node_disj=False..."
	fc = execute_instance(G, demands, cost_func=cost_func_inv)
	ports_dict = create_ports_dict(G, demands)
	(requests,faults) = create_requests_faults_dict(fc.pps,fc.bps)
	# fictitious filename, just to caching purpose
	filename=str(N)+'X'+str(N)+'greedy.txt'
	(fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests,faults,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename,confirm_cache_loading=False,dpctl_script=False)

	flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict)
	tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global']
	'''print 'min',min(tot_flows)
	print 'avg',sum(tot_flows)/float(len((tot_flows)))
	print 'max',max(tot_flows)'''
	
	D = len(demands)
	F = len(G.edges())/2 if isinstance(G,nx.DiGraph) else len(G.edges())
	print 'O(D*F) = %d*%d = %d'%(D,F,D*F)
	stats = [str(N)+"x"+str(N)+" greedy",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}]
	out_q.put(stats)
	with open("tmp/"+str(N)+"x"+str(N)+" greedy.txt", "a+") as out_file:
		out_file.write(str(stats)+"\n")
	return stats
コード例 #11
0
ファイル: fig7_ryu_app.py プロジェクト: rubiruchi/spider
    def __init__(self, *args, **kwargs):
        super(OpenStateFaultTolerance, self).__init__(*args, **kwargs)

        DELTA_6_VALUES = eval(
            os.environ['DELTA_6_VALUES'])  # ugly and dangerous!
        delta_7 = float(os.environ['delta_7'])
        delta_5 = float(os.environ['delta_5'])
        f_t_parser.detection_timeouts_list = [(x, delta_7, delta_5)
                                              for x in DELTA_6_VALUES]

        results_hash = f_t_parser.md5sum_results()
        if f_t_parser.network_has_changed(results_hash):
            f_t_parser.erase_figs_folder()

        (self.requests,
         self.faults) = f_t_parser.parse_ampl_results_if_not_cached()

        print len(self.requests), 'requests loaded'
        print len(self.faults), 'faults loaded'

        print "Building network graph from network.xml..."
        # G is a NetworkX Graph object
        (self.G, self.pos, self.hosts, self.switches,
         self.mapping) = f_t_parser.parse_network_xml()
        print 'Network has', len(
            self.switches), 'switches,', self.G.number_of_edges() - len(
                self.hosts), 'links and', len(self.hosts), 'hosts'

        print "NetworkX to Mininet topology conversion..."
        # mn_topo is a Mininet Topo object
        self.mn_topo = f_t_parser.networkx_to_mininet_topo(
            self.G, self.hosts, self.switches, self.mapping)
        # mn_net is a Mininet object
        self.mn_net = f_t_parser.create_mininet_net(self.mn_topo)

        f_t_parser.launch_mininet(self.mn_net)

        self.ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(
            self.mn_topo.ports)

        f_t_parser.mn_setup_MAC_and_IP(self.mn_net)

        f_t_parser.mn_setup_static_ARP_entries(self.mn_net)

        f_t_parser.draw_network_topology(self.G, self.pos, self.ports_dict,
                                         self.hosts)

        (self.fault_ID, self.flow_entries_dict,
         self.flow_entries_with_timeout_dict, self.flow_entries_with_burst_dict
         ) = f_t_parser.generate_flow_entries_dict(
             self.requests,
             self.faults,
             self.ports_dict,
             match_flow=f_t_parser.get_mac_match_mininet,
             check_cache=False)

        # Associates dp_id to datapath object
        self.dp_dictionary = dict()
        # Associates dp_id to a dict associating port<->MAC address
        self.ports_mac_dict = dict()

        # Needed by fault_tolerance_rest
        self.f_t_parser = f_t_parser

        # switch counter
        self.switch_count = 0
コード例 #12
0
ファイル: SPIDER_ctrl.py プロジェクト: OpenState-SDN/spider
    def __init__(self, *args, **kwargs):
        super(SPIDER, self).__init__(*args, **kwargs)

        results_hash = SPIDER_parser.md5sum_results()
        if SPIDER_parser.network_has_changed(results_hash):
            SPIDER_parser.erase_figs_folder()

        (self.requests,self.faults) = SPIDER_parser.parse_ampl_results_if_not_cached()

        print len(self.requests), 'requests loaded'
        print len(self.faults), 'faults loaded'

        print "Building network graph from network.xml..."
        # G is a NetworkX Graph object
        (self.G, self.pos, self.hosts, self.switches, self.mapping) = SPIDER_parser.parse_network_xml()
        print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts'

        print "NetworkX to Mininet topology conversion..."
        # mn_topo is a Mininet Topo object
        self.mn_topo = SPIDER_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping)
        # mn_net is a Mininet object
        self.mn_net = SPIDER_parser.create_mininet_net(self.mn_topo)

        SPIDER_parser.launch_mininet(self.mn_net)

        self.ports_dict = SPIDER_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports)

        SPIDER_parser.mn_setup_MAC_and_IP(self.mn_net)

        SPIDER_parser.mn_setup_static_ARP_entries(self.mn_net)

        SPIDER_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts)

        (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_detection_timeouts_dict, self.flow_entries_with_flowlet_timeouts_dict) = SPIDER_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=SPIDER_parser.get_mac_match_mininet,check_cache=True,dpctl_script=True)

        #SPIDER_parser.print_flow_stats(SPIDER_parser.get_flow_stats_dict(self.flow_entries_dict))

        # Associates dp_id to datapath object
        self.dp_dictionary=dict()
        # Associates dp_id to a dict associating port<->MAC address
        self.ports_mac_dict=dict()

        # Needed by SPIDER_ctrl_REST
        self.SPIDER_parser = SPIDER_parser
コード例 #13
0
    def __init__(self, *args, **kwargs):
        super(OpenStateFaultTolerance, self).__init__(*args, **kwargs)

        delta_6 = float(os.environ['delta_6'])
        delta_7 = float(os.environ['delta_7'])
        delta_5 = float(os.environ['delta_5'])
        f_t_parser.detection_timeouts_list = [(delta_6,delta_7,delta_5)]

        self.REALIZATIONS_NUM = int(os.environ['REALIZATIONS_NUM'])

        results_hash = f_t_parser.md5sum_results()
        if f_t_parser.network_has_changed(results_hash):
            f_t_parser.erase_figs_folder()

        (self.requests,self.faults) = f_t_parser.parse_ampl_results_if_not_cached()

        print len(self.requests), 'requests loaded'
        print len(self.faults), 'faults loaded'

        print "Building network graph from network.xml..."
        # G is a NetworkX Graph object
        (self.G, self.pos, self.hosts, self.switches, self.mapping) = f_t_parser.parse_network_xml()
        print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts'

        print "NetworkX to Mininet topology conversion..."
        # mn_topo is a Mininet Topo object
        self.mn_topo = f_t_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping)
        # mn_net is a Mininet object
        self.mn_net = f_t_parser.create_mininet_net(self.mn_topo)

        f_t_parser.launch_mininet(self.mn_net)

        self.ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports)

        f_t_parser.mn_setup_MAC_and_IP(self.mn_net)

        f_t_parser.mn_setup_static_ARP_entries(self.mn_net)

        f_t_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts)

        (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_timeout_dict, self.flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False)

        # Associates dp_id to datapath object
        self.dp_dictionary=dict()
        # Associates dp_id to a dict associating port<->MAC address
        self.ports_mac_dict=dict()

        # Needed by fault_tolerance_rest
        self.f_t_parser = f_t_parser

        # switch counter
        self.switch_count = 0
コード例 #14
0
    def __init__(self, *args, **kwargs):
        super(SPIDER, self).__init__(*args, **kwargs)

        results_hash = SPIDER_parser.md5sum_results()
        if SPIDER_parser.network_has_changed(results_hash):
            SPIDER_parser.erase_figs_folder()

        (self.requests,
         self.faults) = SPIDER_parser.parse_ampl_results_if_not_cached()

        print len(self.requests), 'requests loaded'
        print len(self.faults), 'faults loaded'

        print "Building network graph from network.xml..."
        # G is a NetworkX Graph object
        (self.G, self.pos, self.hosts, self.switches,
         self.mapping) = SPIDER_parser.parse_network_xml()
        print 'Network has', len(
            self.switches), 'switches,', self.G.number_of_edges() - len(
                self.hosts), 'links and', len(self.hosts), 'hosts'

        print "NetworkX to Mininet topology conversion..."
        # mn_topo is a Mininet Topo object
        self.mn_topo = SPIDER_parser.networkx_to_mininet_topo(
            self.G, self.hosts, self.switches, self.mapping)
        # mn_net is a Mininet object
        self.mn_net = SPIDER_parser.create_mininet_net(self.mn_topo)

        SPIDER_parser.launch_mininet(self.mn_net)

        self.ports_dict = SPIDER_parser.adapt_mn_topo_ports_to_old_API(
            self.mn_topo.ports)

        SPIDER_parser.mn_setup_MAC_and_IP(self.mn_net)

        SPIDER_parser.mn_setup_static_ARP_entries(self.mn_net)

        SPIDER_parser.draw_network_topology(self.G, self.pos, self.ports_dict,
                                            self.hosts)

        (self.fault_ID, self.flow_entries_dict,
         self.flow_entries_with_detection_timeouts_dict,
         self.flow_entries_with_flowlet_timeouts_dict
         ) = SPIDER_parser.generate_flow_entries_dict(
             self.requests,
             self.faults,
             self.ports_dict,
             match_flow=SPIDER_parser.get_mac_match_mininet,
             check_cache=True,
             dpctl_script=True)

        #SPIDER_parser.print_flow_stats(SPIDER_parser.get_flow_stats_dict(self.flow_entries_dict))

        # Associates dp_id to datapath object
        self.dp_dictionary = dict()
        # Associates dp_id to a dict associating port<->MAC address
        self.ports_mac_dict = dict()

        # Needed by SPIDER_ctrl_REST
        self.SPIDER_parser = SPIDER_parser