def launch(): if core.hasComponent('discovery'): component = ArpResponse() core.register('arp_response', component) log.debug("ARP_RESPONSE: arp_response component registered") else: log.debug("ARP_RESPONSE: arp_response *not* loaded. Missing dependencies")
def launch (default_arbiter=True): from pox.core import core if core.hasComponent("openflow"): return if default_arbiter: core.registerNew(OpenFlowConnectionArbiter) core.register("openflow", OpenFlowNexus())
def launch (port=6633, address="0.0.0.0", name=None, private_key=None, certificate=None, ca_cert=None, __INSTANCE__=None): """ Start a listener for OpenFlow connections If you want to enable SSL, pass private_key/certificate/ca_cert in reasonable combinations and pointing to reasonable key/cert files. These have the same meanings as with Open vSwitch's old test controller, but they are more flexible (e.g., ca-cert can be skipped). """ if name is None: basename = "of_01" counter = 1 name = basename while core.hasComponent(name): counter += 1 name = "%s-%s" % (basename, counter) if core.hasComponent(name): log.warn("of_01 '%s' already started", name) return None global deferredSender if not deferredSender: deferredSender = DeferredSender() if of._logger is None: of._logger = core.getLogger('libopenflow_01') l = OpenFlow_01_Task(port = int(port), address = address, ssl_key = private_key, ssl_cert = certificate, ssl_ca_cert = ca_cert) core.register(name, l) return l
def launch (default_arbiter=True): from pox.core import core if core.hasComponent("openflow"): return if default_arbiter: core.registerNew(OpenFlowConnectionArbiter) # register a class core.register("openflow", OpenFlowNexus()) # register an instance of class 'OpenFlowNexus' as 'openflow'
def launch(): # Connect to db tmp_db = Database() core.register("db", tmp_db) # Set all hosts to inactive for h in core.db.find_hosts({}): h['active'] = False core.db.update_host(h) # POX Lib core.openflow.addListenerByName("ConnectionUp", _handleConnectionUp) core.openflow.addListenerByName("ConnectionDown", _handleConnectionDown) core.openflow.addListenerByName("PacketIn", _handlePacketIn) core.openflow.addListenerByName("PortStatus", _handlePortStatus) # Overlord Lib core.devices = Devices() core.hosts = Hosts() core.links = Links() core.forwarding = Forwarding() core.forwarding.add_listener("new_flows", _handleNewFlows) core.hosts.add_listener("host_moved", _handleHostMoved) # Overlord Events web_events = WebMessage() web_events.addListenerByName("WebCommand", _handleWebCommand) t = Thread(target=web_events.run) t.setDaemon(True) t.start()
def launch (foo = False, bar = False): component = AlienComponent("demo1") core.register("alien", component) core.addListenerByName("UpEvent", _go_up)
def launch (*args, **kw): if core.hasComponent('of_01'): return None l = OpenFlow_01_Task(*args, **kw) #l = OpenFlow_01_Loop(*args, **kw) core.register("of_01", l) return l
def __init__ (self, fakeways = [], arp_for_unknowns = False): # These are "fake gateways" -- we'll answer ARPs for them with MAC # of the switch they're connected to. self.fakeways = set(fakeways) # If this is true and we see a packet for an unknown # host, we'll ARP for it. self.arp_for_unknowns = arp_for_unknowns # (IP,dpid) -> expire_time # We use this to keep from spamming ARPs self.outstanding_arps = {} # (IP,dpid) -> [(expire_time,buffer_id,in_port), ...] # These are buffers we've gotten at this datapath for this IP which # we can't deliver because we don't know where they go. self.lost_buffers = {} # For each switch, we map IP addresses to Entries self.arpTable = {} # This timer handles expiring stuff self._expire_timer = Timer(5, self._handle_expiration, recurring=True) core.register("learning_switch", self) self.listenTo(core)
def launch (address='', port=8000, debug=False, static=False): if debug: log.setLevel("DEBUG") log.debug("Debugging enabled") elif log.isEnabledFor("DEBUG"): log.setLevel("INFO") httpd = SplitThreadedServer((address, int(port)), SplitterRequestHandler) core.register("WebServer", httpd) httpd.set_handler("/", CoreHandler, httpd, True) #httpd.set_handler("/foo", StaticContentHandler, {'root':'.'}, True) #httpd.set_handler("/f", StaticContentHandler, {'root':'pox'}, True) #httpd.set_handler("/cgis", SplitCGIRequestHandler, "pox/web/www_root") if static: httpd.add_static_dir('static', 'www_root', relative=True) def run (): try: httpd.serve_forever() except: pass log.info("Server quit") thread = threading.Thread(target=run) thread.daemon = True thread.start()
def launch(**kwargs): """ launch and register Stats instance """ # register the component if core.hasComponent(NAME): return None unit = kwargs.get("UNIT_OF_VALUE", UNIT_OF_VALUE) comp = Stats(unit) core.register(NAME, comp) # attach handlers to listners core.openflow.addListenerByName("FlowStatsReceived", comp._handle_FlowStatsReceived) core.openflow.addListenerByName("AggregateFlowStatsReceived", comp._handle_AggregateFlowStatsReceived) core.openflow.addListenerByName("TableStatsReceived", comp._handle_TableStatsReceived) core.openflow.addListenerByName("PortStatsReceived", comp._handle_PortStatsReceived) core.openflow.addListenerByName("QueueStatsReceived", comp._handle_QueueStatsReceived) core.openflow.addListenerByName("FlowRemoved", comp._handle_FlowRemoved) # timer set to execute every MONITOR_FLOW_PERIOD seconds period = kwargs.get("MONITOR_FLOW_PERIOD", MONITOR_FLOW_PERIOD) Timer(period, _timer_func, recurring=True) return comp
def __init__ (self, arp_for_unknowns = False): self.arp_for_unknowns = arp_for_unknowns self.message_queue = {} self.arpTable= {} """ self.arpTable[1]= {} self.arpTable[2]= {} """ self.ip_to_port = {} self.routingTable={} # Creating two routing table for both switches self.routingTable[1] = [ ['10.0.1.2', '10.0.1.2', 's1-eth1', '10.0.1.1', 1],['10.0.1.3', '10.0.1.3','s1-eth2', '10.0.1.1', 2],['10.0.2.2', '10.0.2.1', 's1-eth3', '10.0.2.1', 3],['10.0.2.3', '10.0.2.1', 's1-eth3', '10.0.2.1',3],['10.0.2.4','10.0.2.1','s1-eth3','10.0.2.1',3]] self.routingTable[2] = [ ['10.0.2.2', '10.0.2.2', 's2-eth1', '10.0.2.1', 1],['10.0.2.3', '10.0.2.3', 's2-eth2', '10.0.2.1', 2],['10.0.2.4', '10.0.2.4', 's2-eth3', '10.0.2.1', 3],['10.0.1.2', '10.0.1.1', 's2-eth4', '10.0.1.1', 4],['10.0.1.3','10.0.1.1','s2-eth4','10.0.1.1',4]] """ self.arpTable[1] = [ ['10.0.1.1', '92:66:3c:5f:00:4f', 1],['10.0.2.1 4','26:22:39:c5:02:e9', 2],['10.0.5.1','82:2e:de:d7:db:a7', 3]] self.arpTable[1][IPAddr("10.0.1.1")]=EthAddr("92:66:3c:5f:00:4f") self.arpTable[1][IPAddr("10.0.2.1")]=EthAddr("26:22:39:c5:02:e9") self.arpTable[1][IPAddr("10.0.5.1")]=EthAddr("82:2e:de:d7:db:a7") self.arpTable[2][IPAddr("10.0.3.1")]=EthAddr("a6:5e:13:57:db:b4") self.arpTable[2][IPAddr("10.0.4.1")]=EthAddr("6e:d3:b4:f2:34:5e") self.arpTable[2][IPAddr("10.0.6.1")]=EthAddr("c6:c1:b9:f8:a9:77") """ self.arpTable=[['10.0.1.1','92:66:3c:5f:00:4f','1'],['10.0.3.1','26:22:39:c5:02:e9','2'],['10.0.3.2','82:2e:de:d7:db:a7','1'],['10.0.2.1','a6:5e:13:57:db:b4','2']] core.register("learning_switch", self) self.listenTo(core)
def launch (port = 6655, address = "0.0.0.0",interface='lo'): #print "\n".join(["%i. %s" % x for x in #enumerate(PCap.get_device_names())]) interfaces=PCap.get_device_names() #print interfaces if interface not in interfaces: log.info("input pcap interface error") raise RuntimeError("input pcap interface error") callback=Callback() core.register("callback", callback) of_fliter="ip proto \\tcp and tcp port 6633 and tcp[13] = 0x018" p = PCap(interface, promiscuous = False,callback = callback.cb, start=False,filter=of_fliter,period = 5) p.set_direction(True, True) p.use_select = False p.start() l = RecMes(port = int(port), address = address) core.register("RecMes", l) return l
def launch (port = POX_PORT,tf=b''): # Send full packets to controller core.openflow.miss_send_len = 0xffff """ Starts the component Topo_controller """ core.registerNew(Topo_controller,tf) #global SRC_DST_VLAN SRC_DST_VLAN_obj = MemoryDict(mem_time=1000) core.register("SRC_DST_VLAN",SRC_DST_VLAN_obj) """ Starts the component MnIf """ core.registerNew(MnIf) core.registerNew(PoxInternalIf) """ Starts the interface component """ global loop loop = RecocoIOLoop() #loop.more_debugging = True loop.start() w = ServerWorker(child_worker_type=NotifyWorker, port = int(port)) loop.register_worker(w)
def launch(): "Start the OpenFlow manager and message receivers" ctrl = PoxManager(log, Config.DbName, Config.DbUser) mq = MsgQueueReceiver(Config.QueueId, ctrl) ctrl.registerReceiver(mq) rpc = RpcReceiver(Config.RpcHost, Config.RpcPort, ctrl) ctrl.registerReceiver(rpc) core.register("ravelcontroller", ctrl)
def launch (port = 6655, address = "0.0.0.0"): #mirrorstatse=MirrorStates() #core.register("mirrorstatse", mirrorstatse) l = SendMesTask(port = int(port), address = address) core.register("SendMesTask", l) return l
def launch(): """start trriger. called from nwgn.py. """ name = "openflow_topology" if not core.hasComponent(name): core.register(name, ScnOpenFlowTopology())
def launch(): # discovery and arp_response are necessary components for routing if core.hasComponent('discovery') and core.hasComponent('arp_response'): component = Routing() core.register('routing', component) log.debug('ROUTING: Routing registered') else: log.error('ROUTING: Routing component *not* loaded. Required components missing')
def launch(**kwargs): if core.hasComponent(NAME): return None comp = VirtualNodeCreator() core.register(NAME, comp) return comp
def launch (): # Generate a explorer to handle the events me = MyExplorer(); core.register("MyExplorer",me) core.Interactive.variables['ME'] = me pox.openflow.discovery.launch() pox.host_tracker.launch()
def launch(packetSize = 2000): if core.hasComponent('setPacketSize'): return None o = PacketSize(packetSize) core.openflow.addListenerByName("ConnectionUp", o.handle_Conn_Up) core.register('setPacketSize', o) return o
def launch(): """ Starts the component """ def start_switch(event): log.debug("Controlling %s" % (event.connection,)) Tutorial(event.connection) core.register('discovery', Discovery()) core.openflow.addListenerByName("ConnectionUp", start_switch)
def softwareswitch (addr, port = 6633, max_retry_delay = 16, dpid = None, __INSTANCE__ = None): """ Launches a SoftwareSwitch Not particularly useful, since SoftwareSwitch doesn't do much. """ from pox.core import core core.register("datapaths", {}) do_launch(SoftwareSwitch, addr, port, max_retry_delay, dpid)
def launch(fileName): if core.hasComponent(NAME): return None comp = Parser() if not comp.parseFile(fileName): return core.register(NAME, comp) return comp
def _launch (): # Make invitable core.MessengerNexus.default_bot.add_bot(OFBot) # Just stick one in a channel OFBot("of_01") # For now, just register something arbitrary so that we can use # this for dependencies core.register(nexus + "_of_service", object())
def launch (no_flow = False, explicit_drop = True, link_timeout = None,eat_early_packets = False): explicit_drop = str_to_bool(explicit_drop) eat_early_packets = str_to_bool(eat_early_packets) install_flow = not str_to_bool(no_flow) if link_timeout: link_timeout = int(link_timeout) old_discovery = discovery.Discovery(explicit_drop=explicit_drop, install_flow=install_flow, link_timeout=link_timeout, eat_early_packets=eat_early_packets) core.register("slow_discovery", old_discovery) #register as core.slow_discovery core.registerNew(FastDiscovery) # register as core.openflow_discovery
def launch(**kwargs): if core.hasComponent(NAME): return None comp = FlowBw() core.register(NAME, comp) # attach handlers to listners core.stats.addListenerByName("FlowStatsEv", comp._handle_FlowStatsEv) comp.listenTo(core.routing) return comp
def launch(alg="proactive", **outband_args): dependencies = [ "openflow.of_01", ["log.level", {"packet": WARN}], "samples.pretty_log", ["allegra.gen_routes_spf", {}, INFO], "allegra.gen_links_ring", ["allegra.outband", outband_args, INFO], ["allegra.get_dp_desc", {"type": "brief"}], "allegra.arp_responder", # because of proactive routing, no packets will # reach the controller, hence it is pointless to # differentiate between arpAware and arpSilent ["host_tracker", {"eat_packets": False, "install_flow": False, "arpAware": 30, "arpSilent": 30}, INFO], ["allegra.outband_host_tracker", {"timeout": 10}, INFO], ["allegra.topo_conf", {"topologies": None}], "topology", "openflow.discovery", "openflow.topology", "pox.messenger", "messenger.log_service", "allegra.linkutil", "allegra.tinytopo", "allegra.flowstat", "allegra.tinylink", "web", "messenger.ajax_transport", "openflow.of_service", "poxdesk", ["allegra.dns_responder", {"no_flow": True}, INFO], ["allegra.host_controller", {}, INFO], ] log_level_param = {} for mod in dependencies: if type(mod) == list and len(mod) == 3: log_level_param[mod[0]] = mod[2] for mod in dependencies: args = {} if type(mod) == list: mod, args = mod[:2] if mod == "log.level": args.update(log_level_param) mod = importlib.import_module(mod) mod.launch(**args) d = {"greedy": GreedyRouting, "predefined": PredefinedRouting, "proactive": ProactiveRouting, "None": None} a = d[alg] if a: core.register("routing", a()) core.call_when_ready(_start_planetlab, ["WebServer", "MessengerNexus_of_service"])
def launch (port = 2555, address = "0.0.0.0"): if core.hasComponent('peer_client') or core.hasComponent('peer_server'): return None peer_server = Broker_01_Task(port = int(port), address = address) core.register("peer_server", peer_server) peers = '192.168.109.228,192.168.109.229' peers = peers.split(',') log.info('need to connect peers: %s' % (peers)) peer_client = Broker_Client_Task(port = int(port), address = peers) core.register("peer_client", peer_client)
def launch(**kwargs): if core.hasComponent(NAME): return None comp = BwFlowBalancing() core.register(NAME, comp) # timer set to execute every BWFLOWBALANCING_PERIOD seconds period = kwargs.get("BWFLOWBALANCING_PERIOD", BWFLOWBALANCING_PERIOD) Timer(period, comp.loop, recurring=True) return comp
def do_launch(cls, standalone, address='127.0.0.1', port=6633, max_retry_delay=16, dpid=None, extra_args=None, **kw): """ Used for implementing custom switch launching functions cls is the class of the switch you want to add. Returns switch instance. """ if extra_args is not None: import ast extra_args = ast.literal_eval('{%s}' % (extra_args,)) kw.update(extra_args) from pox.core import core if not core.hasComponent('datapaths'): core.register("datapaths", {}) _switches = core.datapaths if dpid is None: for dpid in range(1, 256): if dpid not in _switches: break if dpid in _switches: raise RuntimeError("Out of DPIDs") else: dpid = str_to_dpid(dpid) switch = cls(dpid=dpid, name="sw" + str(dpid), **kw) _switches[dpid] = switch port = int(port) max_retry_delay = int(max_retry_delay) def up(event): import pox.lib.ioworker global loop loop = pox.lib.ioworker.RecocoIOLoop() # loop.more_debugging = True loop.start() OpenFlowWorker.begin(loop=loop, addr=address, port=port, max_retry_delay=max_retry_delay, switch=switch) from pox.core import core if not standalone: core.addListenerByName("UpEvent", up) return switch
def __init__(self): core.register("Interactive", self) self.enabled = False self.completion = False self.history = False # import pox.license import sys self.variables = dict(locals()) self.variables['core'] = core self.variables['sync'] = False class pox_exit(object): def __call__(self, code=0): core.quit() sys.exit(code) def __repr__(self): return "Use exit() or Ctrl-D (i.e. EOF) to exit POX" self.variables['exit'] = pox_exit() self.running = False
def got_lease(event): outside_ip = event.lease.address if not event.lease.routers: log.error( "Can't start NAT because we didn't get an upstream gateway") return gateway_ip = event.lease.routers[0] if event.lease.dns_servers: dns_ip = event.lease.dns_servers[0] else: dns_ip = None log.debug('Starting NAT') n = NAT(inside_ip, outside_ip, gateway_ip, dns_ip, outside_port, dpid, subnet=subnet) core.register(n)
def launch(link_weight_type='linear', static_link_weight=STATIC_LINK_WEIGHT, util_link_weight=UTILIZATION_LINK_WEIGHT, flow_replacement_mode='none', flow_replacement_interval=FLOW_REPLACEMENT_INTERVAL_SECONDS): # Method called by the POX core when launching the module link_weight_type_enum = LINK_WEIGHT_LINEAR # Default if 'linear' in str(link_weight_type): link_weight_type_enum = LINK_WEIGHT_LINEAR elif 'exponential' in str(link_weight_type): link_weight_type_enum = LINK_WEIGHT_EXPONENTIAL flow_replacement_mode_int = NO_FLOW_REPLACEMENT if 'periodic' in str(flow_replacement_mode): flow_replacement_mode_int = PERIODIC_FLOW_REPLACEMENT if 'cong_threshold' in str(flow_replacement_mode): flow_replacement_mode_int = CONG_THRESHOLD_FLOW_REPLACEMENT groupflow_manager = GroupFlowManager(link_weight_type_enum, float(static_link_weight), float(util_link_weight), flow_replacement_mode_int, float(flow_replacement_interval)) core.register('openflow_groupflow', groupflow_manager)
def launch(): core.register(ModuleProjet889C())
def launch(debug="False"): if not core.hasComponent("topology_tracker"): core.register("topology_tracker", DynamicTopology(str_to_bool(debug)))
def launch (src_ip="9", src_router_dpid="00.00", dst_mcast_address="10",__INSTANCE__=None): install_rules = MulticastPath(src_ip, src_router_dpid, dst_mcast_address) core.register('MulticastPath', install_rules) core.addListenerByName("UpEvent", _go_up)
def launch(): core.register('discovery', Discovery()) log.info('Discovery registered')
def launch(): controller = task_1_controller() core.register("controller", controller)
def launch (): if not core.hasComponent("openflow_topology"): core.register("openflow_topology", OpenFlowTopology())
def launch(address='', port=8000, static=False, ssl_server_key=None, ssl_server_cert=None, ssl_client_certs=None): def expand(f): if isinstance(f, str): return os.path.expanduser(f) return f ssl_server_key = expand(ssl_server_key) ssl_server_cert = expand(ssl_server_cert) ssl_client_certs = expand(ssl_client_certs) httpd = SplitThreadedServer((address, int(port)), SplitterRequestHandler, ssl_server_key=ssl_server_key, ssl_server_cert=ssl_server_cert, ssl_client_certs=ssl_client_certs) core.register("WebServer", httpd) httpd.set_handler("/", CoreHandler, httpd, True) #httpd.set_handler("/foo", StaticContentHandler, {'root':'.'}, True) #httpd.set_handler("/f", StaticContentHandler, {'root':'pox'}, True) #httpd.set_handler("/cgis", SplitCGIRequestHandler, "pox/web/www_root") if static is True: httpd.add_static_dir('static', 'www_root', relative=True) elif static is False: pass else: static = static.split(",") for entry in static: if entry.lower() == "": httpd.add_static_dir('static', 'www_root', relative=True) continue if ':' not in entry: directory = entry prefix = os.path.split(directory) if prefix[1] == '': prefix = os.path.split(prefix[0]) prefix = prefix[1] assert prefix != '' else: prefix, directory = entry.split(":") directory = os.path.expanduser(directory) httpd.add_static_dir(prefix, directory, relative=False) def run(): try: msg = "https" if httpd.ssl_enabled else "http" msg += "://%s:%i" % httpd.socket.getsockname() log.info("Listening at " + msg) httpd.serve_forever() except: pass log.info("Server quit") def go_up(event): thread = threading.Thread(target=run) thread.daemon = True thread.start() def go_down(event): httpd.shutdown() core.addListenerByName("GoingUpEvent", go_up) core.addListenerByName("GoingDownEvent", go_down)
def launch(): print("in my event!\n") core.register("jia_test", my_event())
def launch(): core.register("gen_routes_spf", GenRoutesSpf())
def _launch(default_arbiter=True): from pox.core import core if default_arbiter: core.registerNew(OpenFlowConnectionArbiter) core.register("openflow", OpenFlowNexus())
def server(port=7791): c = create_server(int(port)) core.register("ctld", c)
def launch(sg_file=None, config=None, gui=False, agent=False, rosapi=False, dovapi=False, full=False, loglevel="INFO", cfor=False, quit=False, visualization=False, mininet=None, test=False, log=None, log_folder=None): """ Launch function called by POX core when core is up. :param sg_file: Path of the input Service graph (optional) :type sg_file: str :param config: additional config file with different name :type config: str :param gui: Signal for initiate GUI (optional) :type gui: bool :param agent: Do not start the service layer (optional) :type agent: bool :param rosapi: :param full: Initiate Infrastructure Layer also :type full: bool :param loglevel: run on specific run level (default: INFO) :type loglevel: str :param visualization: send NFFGs to remote visualization server (optional) :type visualization: bool :param mininet: Path of the initial topology graph (optional) :type mininet: str :param test: Start ESCAPE in test mode (optional) :type test: bool :param log: add ESCAPE main log file for test mode (default: log/escape.log) :type log: str :param quit: Quit after the first service request has processed (optional) :type quit: bool :return: None """ # Store args into this module file global init_param init_param.update(locals()) __setup_pythonpath() __init_loggers(loglevel=loglevel, log=log, test=test, log_folder=log_folder) core_log.info("Load configuration....") __init_config(config=config, test=test, quit=quit) __print_header() if visualization: core_log.debug("Enable remote visualization...") from escape.util.com_logger import RemoteVisualizer core.register(RemoteVisualizer._core_name, RemoteVisualizer()) # Register _start_components() to be called when POX is up core.addListenerByName("GoingUpEvent", _start_components)
def launch(idle_timeout=10): if not core.hasComponent("route_manager"): core.register("route_manager", ProactiveFlows(int(idle_timeout)))
def launch(): core.register("routing", PredefinedRouting())
def launch (): core.register("gen_links_ring", GenLinksRing())
def launch (): controller = MyController() core.register("MyController", controller)
def launch (): if core.hasComponent("openflow"): return core.register("openflow", OpenFlowHub())
def launch(): throttleManager = ThrottleManager() core.register(throttleManager)
def launch(port=6633, address="0.0.0.0"): if core.hasComponent('of_01'): return None l = OpenFlow_01_Task(port=int(port), address=address) core.register("of_01", l) return l
def launch(ports): ports = ports.replace(",", " ").split() l = Hub() core.register("hub", l) for p in ports: l.add_port(p)
def launch(default_arbiter=True): if core.hasComponent("openflow"): return if default_arbiter: core.registerNew(OpenFlowConnectionArbiter) core.register("openflow", OpenFlowNexus())
def _handle_ConnectionUp(event): #log.debug("Connection %s" % (event.connection,)) gw = EE122Gateway(event.connection, event.ofp.ports) core.register("gateway", gw)
def launch(): core.register("routing", ProactiveRouting())
def launch( no_flow=False, network="192.168.0.0/24", # Address range first=1, last=None, count=None, # Address range ip="192.168.0.254", router=(), # Auto dns=(), # Auto dpid=None, # All ports=None, # All __INSTANCE__=None): """ Launch DHCP server Defaults to serving 192.168.0.1 to 192.168.0.253 network Subnet to allocate addresses from first First'th address in subnet to use (256 is x.x.1.0 in a /16) last Last'th address in subnet to use count Alternate way to specify last address to use ip IP to use for DHCP server router Router IP to tell clients. Defaults to 'ip'. 'None' will stop the server from telling clients anything dns DNS IP to tell clients. Defaults to 'router'. 'None' will stop the server from telling clients anything. """ def fixint(i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None return int(i) def fix(i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None if i == '()': return () return i first, last, count = map(fixint, (first, last, count)) router, dns = map(fix, (router, dns)) if ports is not None: ports = ports.split(",") ports = set(int(p) if p.isdigit() else p for p in ports) pool = SimpleAddressPool(network=network, first=first, last=last, count=count) inst = DHCPD(install_flow=not no_flow, pool=pool, ip_address=ip, router_address=router, dns_address=dns, dpid=dpid, ports=ports) if __INSTANCE__[0] == 0: # First or only instance core.register(inst) log.debug("DHCP serving a%s", str(pool)[2:-1])
def launch(): controller = Task2_Controller() core.register("controller", controller)
def launch(network="192.168.0.0/24", dns=None): core.register('dhcp_server', DHCPDMulti(network, dns))
def launch(): core.register("gen_links_fullmesh", GenLinksFullMesh())