def __init__(self, postfix): #log.debug("Monitoring coming up") def startup(): core.openflow.addListeners( self, priority=0xfffffffe ) #took 1 priority lower as the discovery module, although it should not matter core.opennetmon_forwarding.addListeners(self) #("NewPath") self.decreaseTimer = False self.increaseTimer = False self.t = Timer(1, self._timer_MonitorPaths, recurring=True) self.f = open("output.%s.csv" % postfix, "w") #self.f.write("Experiment,Switch,SRC_IP,DST_IP,SRC_PORT,DST_PORT,Packet_Count,Byte_Count,Duration_Sec,Duration_Nsec,Delta_Packet_Count,Delta_Byte_Count,Delta_Duration_Sec,Delta_Duration_Nsec\n") #self.f.flush() #self.f3 = open("/groups/ch-geni-net/GIMITesting/ports.%s.csv"%postfix, "w") #self.f2 = open("delay.%s.csv"%postfix, "w") #self.f2.write("MeasurementType,Src/Initiator,Dst/Switch,Delay\n") #self.f2.flush() self.experiment = postfix #log.debug("Monitoring started") core.call_when_ready(startup, ('opennetmon_forwarding' )) #Wait for opennetmon-forwarding to be started
def launch (): """ Starts the component """ links = {} def start_switch (event): log.debug("Controlling %s" % (event.connection,)) Tutorial(event.connection, event.dpid) def _handle_BarrierIn(self, event): return #On link addition, update network representation def getTopo (event): l = event.link netGraph.add_edge(l.dpid1,l.dpid2) adjacency[l.dpid1][l.dpid2] = l.port1 adjacency[l.dpid2][l.dpid1] = l.port2 def startup(): core.openflow.addListenerByName("ConnectionUp", start_switch) core.openflow_discovery.addListenerByName("LinkEvent",getTopo) core.call_when_ready(startup, ('openflow','openflow_discovery'))
def messenger_service(): def start(): t = TCPTransport('0.0.0.0', '7790') t.start() core.call_when_ready(start, "MessengerNexus", __name__) Messenger()
def __init__(self): # Listen to dependencies def startup(): core.openflow.addListeners(self, priority=0) core.openflow_discovery.addListeners(self) core.call_when_ready(startup, ("openflow", "openflow_discovery"))
def __init__ (self,postfix): log.debug("Monitoring coming up") def startup(): core.openflow.addListeners(self, priority=0xfffffffe) #took 1 priority lower as the discovery module, although it should not matter core.opennetmon_forwarding.addListeners(self) #("NewPath") self.decreaseTimer = False self.increaseTimer = False self.t = Timer(1, self._timer_MonitorPaths, recurring = True) self.f = open("output.%s.csv"%postfix, "w") #self.f.write("Experiment,Switch,SRC_IP,DST_IP,SRC_PORT,DST_PORT,Packet_Count,Byte_Count,Duration_Sec,Duration_Nsec,Delta_Packet_Count,Delta_Byte_Count,Delta_Duration_Sec,Delta_Duration_Nsec\n") #self.f.flush() self.f2 = open("delay.%s.csv"%postfix, "w") self.f2.write("MeasurementType,Src/Initiator,Dst/Switch,Delay\n") self.f2.flush() self.experiment = postfix log.debug("Monitoring started") core.call_when_ready(startup, ('opennetmon_forwarding')) #Wait for opennetmon-forwarding to be started
def __init__ (self,postfix): log.debug("Monitoring coming up") #from ecmp import ofp_match_withHash #self.ofp_match_withHash = ofp_match_withHash def startup(): core.openflow.addListeners(self, priority=0xfffffffe) #took 1 priority lower as the discovery module, although it should not matter self.decreaseTimer = False self.increaseTimer = False self.t = Timer(1, self._timer_MonitorPaths, recurring = True) self.f = open("output.%s.csv"%postfix, "w") #self.f.write("Experiment,Switch,SRC_IP,DST_IP,SRC_PORT,DST_PORT,Packet_Count,Byte_Count,Duration_Sec,Duration_Nsec,Delta_Packet_Count,Delta_Byte_Count,Delta_Duration_Sec,Delta_Duration_Nsec\n") self.f.flush() self.f2 = open("delay.%s.csv"%postfix, "w") self.f2.write("MeasurementType,Src/Initiator,Dst/Switch,Delay\n") self.f2.flush() self.experiment = postfix self.toSend = 0 self.flows = [] self.flowSendTimer = Timer(20, self._send_flowsToEcmp, recurring = True) self.bw = 0.01 #10 Mbps WRT 1Gbps. Here 1Gbps is 1 log.debug("Monitoring started") core.call_when_ready(startup, ('openflow_discovery')) #Wait for opennetmon-forwarding to be started
def __init__(self): self.connections = set() self.topology = Topology() # Esperando que los modulos openflow y openflow_discovery esten listos core.call_when_ready(self.startup, ('openflow', 'openflow_discovery'))
def __init__(self): global LinkList def startup(): core.openflow.addListeners(self) core.openflow_discovery.addListeners(self) core.call_when_ready(startup,('openflow','openflow_discovery')) LinkList = []
def __init__ (self, transparent): def startup(): core.openflow.addListeners(self) core.openflow_discovery.addListeners(self) core.host_tracker.addListeners(self) self.transparent = transparent core.call_when_ready(startup, ('openflow', 'openflow_discovery', 'host_tracker'))
def __init__(self): # Listen to dependencies def startup(): core.openflow.addListeners(self, priority=0) core.openflow_discovery.addListeners(self) core.call_when_ready(startup, ('openflow', 'openflow_discovery'))
def __init__(self, link_weight_type, static_link_weight, util_link_weight, flow_replacement_mode, flow_replacement_interval): # Listen to dependencies def startup(): core.openflow.addListeners(self, priority = 99) core.openflow_igmp_manager.addListeners(self, priority = 99) core.openflow_flow_tracker.addListeners(self, priority = 99) self.link_weight_type = link_weight_type log.info('Set link weight type: ' + str(self.link_weight_type)) self.static_link_weight = float(static_link_weight) if self.static_link_weight == 0: self.static_link_weight = sys.float_info.min self.util_link_weight = float(util_link_weight) log.info('Set StaticLinkWeight:' + str(self.static_link_weight) + ' UtilLinkWeight:' + str(self.util_link_weight)) self.flow_replacement_mode = flow_replacement_mode self.flow_replacement_interval = flow_replacement_interval log.info('Set FlowReplacementMode:' + str(flow_replacement_mode) + ' FlowReplacementInterval:' + str(flow_replacement_interval) + ' seconds') self.adjacency = defaultdict(lambda : defaultdict(lambda : None)) self.topology_graph = [] self.node_set = Set() self.multicast_paths = defaultdict(lambda : defaultdict(lambda : None)) self.multicast_paths_by_flow_cookie = {} # Stores references to the same objects as self.multicast_paths, except this map is keyed by flow_cookie self._next_mcast_group_cookie = 54345; # Arbitrary, not set to 1 to avoid conflicts with other modules # Desired reception state as delivered by the IGMP manager, keyed by the dpid of the router for which # the reception state applies self.desired_reception_state = defaultdict(lambda : None) # Setup listeners core.call_when_ready(startup, ('openflow', 'openflow_igmp_manager', 'openflow_flow_tracker'))
def __init__(self): core.openflow.addListeners(self) def startup(): core.openflow.addListeners(self, priority=0) core.openflow_discovery.addListeners(self) core.call_when_ready(startup, ('openflow', 'openflow_discovery')) self.event = None self.dpid = None self.in_port = None self.packet = None self.dst_dpid = None self.out_port = None self.table = ECMPTable() self.eth_packet = None self.ip_packet = None self.arp_packet = None self.icmp_packet = None self.tcp_packet = None self.udp_packet = None self.net_packet = None self.protocol_packet = None self.protocol = None self.arp_table = {} self.is_ip = True self.adjacency = {} self.host_tracker = host_tracker() log.info("controller ready")
def __init__(self, postfix): log.debug("Monitoring coming up") #from ecmp import ofp_match_withHash #self.ofp_match_withHash = ofp_match_withHash def startup(): core.openflow.addListeners( self, priority=0xfffffffe ) #took 1 priority lower as the discovery module, although it should not matter self.decreaseTimer = False self.increaseTimer = False self.t = Timer(1, self._timer_MonitorPaths, recurring=True) self.f = open("output.%s.csv" % postfix, "w") self.f.write( "timeString,dpid,newSource,newDest,newProtocol,packetCount,byteCount,durationSec,durationNanoSec,deltaPacketCount,deltaByteCount,deltaDurationSec,deltaDurationNanoSec,tpSource,tpDest,throughput\n" ) self.f.flush() self.f2 = open("delay.%s.csv" % postfix, "w") self.f2.write("MeasurementType,Src/Initiator,Dst/Switch,Delay\n") self.f2.flush() self.experiment = postfix log.debug("Monitoring started") core.call_when_ready(startup, ('openflow_discovery' )) #Wait for opennetmon-forwarding to be started
def launch(setup = 0, num_links = 0, max_link_interval = 0, connected_perc = 50, arp_timeout = 2): global setup_time global total_num_links global link_timeout global max_connected_perc global arp_cache_timeout setup_time = int(setup) total_num_links = 2 * int(num_links) link_timeout = int(max_link_interval) max_connected_perc = float(connected_perc)/100.0 arp_cache_timeout = int(arp_timeout) if total_num_links: log.info( 'Num of links to wait for : {0}'.format(total_num_links) ) if setup_time: log.info( 'Setup time : {0}'.format(setup_time) ) if not total_num_links and not setup_time and not link_timeout: link_timeout = 5 #defaulting to link timeout of 10s if link_timeout: log.info( 'Max interval between two link discovery events : {0}'.format(link_timeout) ) log.info( 'Max fraction of links to switches in an edge switch : {0}'.format(max_connected_perc) ) log.info( 'Max arp cache timeout of a host : {0}'.format(arp_cache_timeout) ) core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) core.openflow_discovery.addListenerByName("LinkEvent", _handle_LinkEvent) core.call_when_ready(_ready, "openflow_discovery") core.Interactive.variables['move'] = move_host core.Interactive.variables['move_batch'] = move_batch
def __init__(self): def startup(): core.openflow.addListeners(self, priority=0) core.openflow_discovery.addListeners(self) core.call_when_ready(startup, ('openflow', 'openflow_discovery')) print "init over"
def __init__(self): core.openflow.addListeners(self) def start_me(): ECMP() core.call_when_ready(start_me, "openflow_discovery")
def __init__ (self): def startup (): self.HOST_TIMEOUT = 15 # time (in seconds) to perform cleanup, generally it should be greater than expected HARD_TIMEOUTs Timer(self.HOST_TIMEOUT, self.host_refresh, recurring=True) core.openflow.addListeners(self, priority=0) core.openflow_discovery.addListeners(self) core.call_when_ready(startup, ('openflow','openflow_discovery'))
def launch(): #global _noflood_by_default, _hold_down #if no_flood is True: # _noflood_by_default = True #if hold_down is True: # _hold_down = True # startTimeStamp=time.time() # flow1 = flow(nw_src = IPAddr('10.0.0.1'), # nw_dst = IPAddr('10.0.0.2')) # # flow1_path = [2,5,8,10,11] # flow1_path = [11,10,8,5,2] # f1 = entry_action_attach_to_path(path = flow1_path) # flow1.actions.append(entry_action_attach_to_path(path = flow1_path)) # flow1.calc_actions_graph() # flow2 = flow(nw_src = IPAddr('10.0.0.4'), # nw_dst = IPAddr('10.0.0.5')) # flow2_allow = set([8,9,10]) # flow2.actions.append(entry_action_flow_forbidden(allow_set = flow2_allow)) # flow2.calc_actions_graph() # flow3 = flow(dl_vlan = 25) # flow4 = flow(dl_vlan = 25, nw_src = IPAddr('10.0.0.4'), # nw_dst = IPAddr('10.0.0.5')) # flow_generation(NUM = 500) # startTimeStamp=time.time() # flow2 = flow(nw_src = IPAddr('10.0.0.4'), # nw_dst = IPAddr('10.0.0.50')) # flow2_allow = set([1]) # flow2.actions.append(entry_action_flow_forbidden(allow_set = flow2_allow)) # flow2.calc_actions_graph() # print lvl_element, flow_name # endTimeStamp=time.time() # print (endTimeStamp-startTimeStamp)*1000, 'ms' # print flow_name # print end_leaf_nodes # # print flow1.flow_graph # print flow_root.nodes_down def start_flowpr (): # flow_root = flow_root() core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) core.openflow.addListenerByName("ConnectionDown", _handle_ConnectionDown) core.openflow_discovery.addListenerByName("LinkEvent", _handle_LinkEvent) core.openflow.addListenerByName("FlowStatsReceived", _handle_flow_stats) core.openflow.addListenerByName("QueueStatsReceived", _handle_queue_stats) core.openflow.addListenerByName("PortStatsReceived", _handle_port_stats) core.openflow.addListenerByName("TableStatsReceived", _handle_table_stats) core.openflow.addListenerByName("PacketIn", _handle_PacketIn) Timer(5, request_flowstats, recurring = True) #Timer(10, request_portstats, recurring = True) log.debug("Flowpr component ready") core.call_when_ready(start_flowpr, "openflow_discovery")
def __init__(self): self.connections = set() self.switches = [] self.fat_tree = FatTree() # Esperando que los modulos openflow y openflow_discovery esten listos core.call_when_ready(self.startup, ('openflow', 'openflow_discovery'))
def launch(): def start_flowpr (): core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) core.openflow.addListenerByName("ConnectionDown", _handle_ConnectionDown) core.openflow.addListenerByName("PacketIn", _handle_PacketIn) core.call_when_ready(start_flowpr, "openflow_discovery")
def __init__(self): log.info("DiscoveryPath has come up") def startup(): core.openflow.addListeners(self) core.openflow_discovery.addListeners(self) core.host_tracker.addListeners(self) core.call_when_ready(startup, ('openflow','openflow_discovery','host_tracker'))
def __init__(self): def startup(): core.openflow.addListeners(self) core.openflow_discovery.addListeners(self) core.call_when_ready(startup, ('openflow', 'openflow_discovery')) log.debug("init over")
def __init__ (self): self.connections = set() self.switches = {} self.paths = {} # Esperando que los modulos openflow y openflow_discovery esten listos core.call_when_ready(self.startup, ('openflow', 'openflow_discovery'))
def launch(): #global _noflood_by_default, _hold_down #if no_flood is True: # _noflood_by_default = True #if hold_down is True: # _hold_down = True __init__() # flow1 = flow(dl_src = IPAddr("10.0.0.1"), # dl_dst = IPAddr("10.0.0.2")) # flow1_path = [2,5,8,10,11] # flow1.attach_to_path(path = flow1_path) def start_flowpr (): # flow_root = flow_root() core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) core.openflow.addListenerByName("ConnectionDown", _handle_ConnectionDown) core.openflow_discovery.addListenerByName("LinkEvent", _handle_LinkEvent) core.openflow.addListenerByName("FlowStatsReceived", _handle_flow_stats) core.openflow.addListenerByName("QueueStatsReceived", _handle_queue_stats) core.openflow.addListenerByName("PortStatsReceived", _handle_port_stats) core.openflow.addListenerByName("TableStatsReceived", _handle_table_stats) core.openflow.addListenerByName("PacketIn", _handle_PacketIn) Timer(5, request_flowstats, recurring = True) #Timer(10, request_portstats, recurring = True) log.debug("Flowpr component ready") core.call_when_ready(start_flowpr, "openflow_discovery")
def launch(setup=0, num_links=0, max_link_interval=0, connected_perc=50, arp_timeout=2): global setup_time global total_num_links global link_timeout global max_connected_perc global arp_cache_timeout setup_time = int(setup) total_num_links = 2 * int(num_links) link_timeout = int(max_link_interval) max_connected_perc = float(connected_perc) / 100.0 arp_cache_timeout = int(arp_timeout) if total_num_links: log.info('Num of links to wait for : {0}'.format(total_num_links)) if setup_time: log.info('Setup time : {0}'.format(setup_time)) if not total_num_links and not setup_time and not link_timeout: link_timeout = 5 #defaulting to link timeout of 10s if link_timeout: log.info('Max interval between two link discovery events : {0}'.format( link_timeout)) log.info( 'Max fraction of links to switches in an edge switch : {0}'.format( max_connected_perc)) log.info('Max arp cache timeout of a host : {0}'.format(arp_cache_timeout)) core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) core.openflow_discovery.addListenerByName("LinkEvent", _handle_LinkEvent) core.call_when_ready(_ready, "openflow_discovery") core.Interactive.variables['move'] = move_host core.Interactive.variables['move_batch'] = move_batch
def __init__(self): self.graph = nx.Graph() self.output_ports = {} self.flows_to_paths = {} self.round_robin_counter = 0 self.hosts_links = {} core.call_when_ready(self.start, ('openflow', 'openflow_discovery'))
def launch(): def start(): core.openflow_discovery.addListenerByName("LinkEvent", _handle_links) core.openflow.addListenerByName("PacketIn", _handle_PacketIn) log.info("FlowVisor Pair-Learning switch running.") core.call_when_ready(start, "openflow_discovery")
def launch(): #global _noflood_by_default, _hold_down #if no_flood is True: # _noflood_by_default = True #if hold_down is True: # _hold_down = True __init__() # flow1 = flow(dl_src = IPAddr("10.0.0.1"), # dl_dst = IPAddr("10.0.0.2")) # flow1_path = [2,5,8,10,11] # flow1.attach_to_path(path = flow1_path) def start_flowpr(): # flow_root = flow_root() core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) core.openflow.addListenerByName("ConnectionDown", _handle_ConnectionDown) core.openflow_discovery.addListenerByName("LinkEvent", _handle_LinkEvent) core.openflow.addListenerByName("FlowStatsReceived", _handle_flow_stats) core.openflow.addListenerByName("QueueStatsReceived", _handle_queue_stats) core.openflow.addListenerByName("PortStatsReceived", _handle_port_stats) core.openflow.addListenerByName("TableStatsReceived", _handle_table_stats) core.openflow.addListenerByName("PacketIn", _handle_PacketIn) Timer(5, request_flowstats, recurring=True) #Timer(10, request_portstats, recurring = True) log.debug("Flowpr component ready") core.call_when_ready(start_flowpr, "openflow_discovery")
def launch(log_name="test", server_isn=None): run_time = 2 def setup(): log = core.getLogger(log_name) tester = Tester(log) topo = core.sim_topo c1 = core.sim_topo.get_node("c1") s1 = core.sim_topo.get_node("s1") r1 = core.sim_topo.get_node("r1") r2 = core.sim_topo.get_node("r2") tm = core.sim_topo.time def do_score(): secret_word = proj2_survey() hashed = hashlib.sha256(secret_word.encode('utf-8')).hexdigest() tester.expect_eq( "571e437548ffbac2cccfa26d7026aa7bd84186d79ca5ab7a5924d9026359b9e0", hashed, "SHA matches") def on_end(): try: do_score() tester.finish() except Exception: log.exception("Exception during scoring") core.quit() tm.set_timer_at(float(run_time), on_end) core.call_when_ready(setup, ["sim_topo"], "test")
def launch(): # Instatiate NetworkTopology class topo = NetworkTopology() # Print empty topology topo.printTopology() # Start Openflow Discovery module pox.openflow.discovery.launch() # Start POX Topology module pox.topology.launch() # Start Openflow Topology module pox.openflow.topology.launch() def addEventListeners(): # Event listener for adding / removing Links core.openflow_discovery.addListenerByName( "LinkEvent", topo._handle_linkEvent) # Event listener for adding switches core.topology.addListenerByName("SwitchJoin", topo._handle_newSwitch) # Event listener for removing switches core.topology.addListenerByName( "SwitchLeave", topo._handle_removeSwitch) core.openflow.addListenerByName( "ConnectionDown", topo._handle_connectionDown) # Add event listeners as soon as Openflow Discovery is loaded core.call_when_ready(addEventListeners, "openflow_discovery")
def launch(prefix='/topo_conf', gen_dir='../../../tools/gen', topologies=['k5', 'k3', 'r5']): def _launch(): core.WebServer.set_handler(prefix, TopoConfHandler, topologies) core.WebServer.add_static_dir("gen", gen_dir, relative=True) core.call_when_ready(_launch, ('WebServer', 'Outband'))
def launch(arg1=True): """ Starts a controller. """ def init(): core.registerNew(ProactiveRouting, arg1) core.call_when_ready(init, ['ARPResponder', 'PortStat'])
def __init__(self): #print 'l2_multi' # Listen to dependencies def startup(): core.openflow.addListeners(self, priority=0) core.openflow_discovery.addListeners(self) core.call_when_ready(startup, ('openflow', 'openflow_discovery'))
def __init__(self): self.connections = set() #self.switches = Graph() # "Grafo" de adyacencias self.switches = {} self.paths = {} #self.links = {} # Mapeo de dpids a direcciones ethernet # Esperando que los modulos openflow y openflow_discovery esten listos core.call_when_ready(self.startup, ('openflow', 'openflow_discovery'))
def __init__(self, wifi=False): self.wifi = wifi self.entities = {} self.workspaces = {} def startup (): core.openflow.addListeners(self, priority=0) core.openflow_discovery.addListeners(self) core.call_when_ready(startup, ('openflow','openflow_discovery'))
def launch(): def start_loop_discovery(): core.registerNew(LoopDiscovery) pox.openflow.discovery.launch() pox.host_tracker.launch() playground.controller.toponizer.launch() core.call_when_ready(start_loop_discovery, 'toponizer')
def __init__(self): def start(): core.openflow.addListeners(self) core.openflow_discovery.addListeners(self) log.debug("Listeners added") core.listen_to_dependencies(self) core.call_when_ready(start, ("openflow", "openflow_discovery"))
def launch (): def start (): if not core.NX.convert_packet_in: log.error("PacketIn conversion required") return core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) log.info("Simple NX switch running.") core.call_when_ready(start, ['NX','openflow'])
def launch (no_clear_tables = False): global clear_tables_on_change clear_tables_on_change = not no_clear_tables def start (): core.openflow.addListenerByName("PacketIn",packet_handler,priority=1) core.tk.do(setup) core.call_when_ready(start, ['openflow','tk'])
def launch(username='', password=''): def _launch(): cfg = {} if len(username) and len(password): cfg['auth'] = lambda u, p: (u == username) and (p == password) core.WebServer.set_handler("/OF/", OFRequestHandler, cfg, True) core.call_when_ready(_launch, ["WebServer", "openflow"], name="openflow.webservice")
def launch (username='', password=''): def _launch (): cfg = {} if len(username) and len(password): cfg['auth'] = lambda u, p: (u == username) and (p == password) core.WebServer.set_handler("/OF/",OFRequestHandler,cfg,True) core.call_when_ready(_launch, ["WebServer","openflow"], name = "openflow.webservice")
def __init__ (self): log.debug("Forwarding is initialized") def startup(): core.openflow.addListeners(self) core.openflow_discovery.addListeners(self) log.debug("Forwarding started") core.call_when_ready(startup, 'openflow', 'openflow_discovery')
def launch(): def start(): if not core.NX.convert_packet_in: log.error("PacketIn conversion required") return core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) log.info("Simple NX switch running.") core.call_when_ready(start, ['NX', 'openflow'])
def __init__(self): log.debug("Forwarding is initialized") def startup(): core.openflow.addListeners(self) core.openflow_discovery.addListeners(self) log.debug("Forwarding started") core.call_when_ready(startup, 'openflow', 'openflow_discovery')
def launch (nexus = "MessengerNexus"): def start (nexus): # One bot for default log channel real_nexus = core.components[nexus] LogBot(real_nexus.get_channel('log')) # This will create new channels on demand real_nexus.addListener(ChannelCreate, _handle_new_channel) core.call_when_ready(start, nexus, args=[nexus])
def launch (username='', password=''): def _launch (): cfg = {} if len(username) and len(password): cfg['auth'] = lambda u, p: (u == username) and (p == password) core.WebServer.set_handler("/hostconfig/", HostRPCHandler, cfg, True) core.registerNew(HostController) core.call_when_ready(_launch, ["WebServer", "openflow"], name = "allegra.host_controller")
def __init__ (self, l3_matching): log.debug("Forwarding coming up") def startup(): core.openflow.addListeners(self) core.openflow_discovery.addListeners(self) log.debug("Forwarding started") self.l3_matching = l3_matching core.call_when_ready(startup, 'openflow', 'openflow_discovery')
def __init__(self, wifi=False, aggr=False): self.wifi = wifi self.addrResp = None self.aggr = aggr self.flow = 0 self.entities = {} self.workspaces = {} def startup (): core.openflow.addListeners(self, priority=0) core.call_when_ready(startup, ('openflow'))
def launch (): from pox.openflow.discovery import launch launch() def start_launch (): core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) core.openflow.addListenerByName("PacketIn", handle_pkt) core.openflow.addListenerByName("SwitchDescReceived", handle_switch_desc) core.openflow.addListenerByName("PortStatsReceived", handle_PortStatsReceived) print "Latency monitor" log.debug("Latency monitor running") core.call_when_ready(start_launch, "openflow_discovery")
def launch (interval = 20): global _interval _interval = float(interval) def start (): global _running if _running: log.error("Keepalive already running") return _running = True Timer(_interval, _handle_timer, recurring=True, args=(core.openflow,)) core.call_when_ready(start, "openflow", __name__)
def __init__(self): """Configures the terminate_benchmarking() method as a handler for SIGINT signals""" def startup(): log.info('Module initialized.') self._module_init_time = time.time() self._module_init_time = 0 signal.signal(signal.SIGINT, self.terminate_benchmarking) # Setup listeners core.call_when_ready(startup, ('openflow', 'openflow_flow_tracker', 'groupflow_event_tracer'))
def launch (): from pox.openflow.discovery import launch launch() def start_launch (): core.registerNew(l2_multi) core.openflow.addListenerByName("SwitchDescReceived", handle_switch_desc) core.openflow.addListenerByName("QueueStatsReceived", handle_QueueStatsReceived) print "Proto-x" log.debug("Latency monitor running") GetTopologyParams() core.call_when_ready(start_launch, "openflow_discovery")