def build_graph(self): new_graph = DiGraph() # Rebuild the graph from the LSDB for lsa in chain(self.routers.values(), self.networks.values(), self.ext_networks.values()): lsa.apply(new_graph, self) # Contract all IPs to their respective router-id for lsa in self.routers.values(): lsa.contract_graph(new_graph, self.router_private_address.get( lsa.routerid, [])) # Figure out the controllers layout base_net = ip_network(CFG.get(DEFAULTSECT, 'base_net')) controller_prefix = CFG.getint(DEFAULTSECT, 'controller_prefixlen') # Group by controller and log them for ip in new_graph.nodes_iter(): addr = ip_address(ip) if addr in base_net: """1. Compute address diff to remove base_net 2. Right shift to remove host bits 3. Mask with controller mask """ id = (((int(addr) - int(base_net.network_address)) >> base_net.max_prefixlen - controller_prefix) & ((1 << controller_prefix) - 1)) self.controllers[id].append(ip) # Contract them on the graph for id, ips in self.controllers.iteritems(): contract_graph(new_graph, ips, 'C_%s' % id) # Remove generated self loops new_graph.remove_edges_from(new_graph.selfloop_edges()) self.apply_secondary_addresses(new_graph) return new_graph
def __init__(self, instance_number): """ :param instance_number: the controller instance number :param net: the subnet allocated for the fibbing nodes """ self.leader = False self.instance = instance_number self.name = 'c%s' % instance_number self.nodes = {} self.bridge = Bridge('br0', self.name) self.root = None net = ip_network(CFG.get(DEFAULTSECT, 'base_net')) controller_prefix = CFG.getint(DEFAULTSECT, 'controller_prefixlen') host_prefix = net.max_prefixlen - controller_prefix controller_base = (int(net.network_address) + (instance_number << host_prefix)) controller_net = ip_address(controller_base) self.net = ip_network('%s/%s' % (controller_net, controller_prefix)) self.graph_thread = Thread(target=self.infer_graph, name="Graph inference thread") self.json_proxy = SJMPServer(hostname=CFG.get(DEFAULTSECT, 'json_hostname'), port=CFG.getint(DEFAULTSECT, 'json_port'), invoke=self.proxy_connected, target=FakeNodeProxyImplem(self)) self.json_thread = Thread(target=self.json_proxy.communicate) # Used to assign unique router-id to each node self.next_id = 1 self.links = [] # The fibbing routes self.routes = {} self.route_mappings = {}
def __init__(self, instance_number): """ :param instance_number: the controller instance number :param net: the subnet allocated for the fibbing nodes """ self.leader = False self.instance = instance_number self.name = 'c%s' % instance_number self.nodes = {} self.bridge = Bridge('br0', self.name) self.root = None net = ip_network(CFG.get(DEFAULTSECT, 'base_net')) controller_prefix = CFG.getint(DEFAULTSECT, 'controller_prefixlen') host_prefix = net.max_prefixlen - controller_prefix controller_base = (int(net.network_address) + (instance_number << host_prefix)) controller_net = ip_address(controller_base) self.net = ip_network('%s/%s' % (controller_net, controller_prefix)) self.graph_thread = daemon_thread(target=self.infer_graph, name="Graph inference thread") self.json_proxy = SJMPServer(hostname=CFG.get(DEFAULTSECT, 'json_hostname'), port=CFG.getint(DEFAULTSECT, 'json_port'), invoke=self.proxy_connected, target=FakeNodeProxyImplem(self)) self.json_thread = daemon_thread(target=self.json_proxy.communicate, name="JSON proxy thread") # Used to assign unique router-id to each node self.next_id = 1 self.links = [] # The fibbing routes self.routes = {} self.route_mappings = {}
def update_graph(self, new_graph): self.leader_watchdog.check_leader(self.get_leader()) added_edges = new_graph.difference(self.graph) removed_edges = self.graph.difference(new_graph) node_prop_diff = {n: data for n, data in new_graph.nodes_iter(data=True) if n not in self.graph or (data.viewitems() - self.graph.node[n].viewitems())} # Propagate differences if added_edges or removed_edges or node_prop_diff: log.debug('Pushing changes') for u, v in added_edges: self.for_all_listeners('add_edge', u, v, new_graph.export_edge_data(u, v)) for u, v in removed_edges: self.for_all_listeners('remove_edge', u, v) if node_prop_diff: self.for_all_listeners('update_node_properties', **node_prop_diff) if CFG.getboolean(DEFAULTSECT, 'draw_graph'): new_graph.draw(CFG.get(DEFAULTSECT, 'graph_loc')) self.graph = new_graph log.info('LSA update yielded +%d -%d edges changes, ' '%d node property changes', len(added_edges), len(removed_edges), len(node_prop_diff)) self.for_all_listeners('commit')
def __init__(self): self.private_address_network = ip_network(CFG.get(DEFAULTSECT, 'private_net')) try: with open(CFG.get(DEFAULTSECT, 'private_ips'), 'r') as f: self.private_address_binding = json.load(f) self.router_private_address = {} for subnets in self.private_address_binding.itervalues(): for rid, ip in subnets.iteritems(): try: iplist = self.router_private_address[rid] except KeyError: iplist = self.router_private_address[rid] = [] iplist.append(ip) except Exception as e: log.error('Incorrect private IP addresses binding file') log.error(str(e)) self.private_address_binding = {} self.router_private_address = {} self.last_line = '' self.transaction = None self.graph = DiGraph() self.routers = {} # router-id : lsa self.networks = {} # DR IP : lsa self.ext_networks = {} # (router-id, dest) : lsa self.controllers = defaultdict(list) # controller nr : ip_list self.listener = {} self.keep_running = True self.queue = Queue() self.processing_thread = Thread(target=self.process_lsa, name="lsa_processing_thread") self.processing_thread.start()
def launch_controller(): CFG.read(cfg.C1_cfg) db = TopologyDB(db=cfg.DB_path) manager = SouthboundManager(optimizer=OSPFSimple()) try: manager.run() except KeyboardInterrupt: manager.stop()
def __init__(self, *args, **kwargs): super(SouthboundListener, self).__init__(*args, **kwargs) self.igp_graph = IGPGraph() self.dirty = False self.json_proxy = SJMPClient(hostname=CFG.get(DEFAULTSECT, 'json_hostname'), port=CFG.getint(DEFAULTSECT, 'json_port'), target=self) self.quagga_manager = ProxyCloner(FakeNodeProxy, self.json_proxy)
def launch_controller(): CFG.read(C1_cfg) db = TopologyDB(db=DB_path) manager = SouthboundManager(optimizer=OSPFSimple()) manager.simple_path_requirement(db.subnet(R3, D1), [db.routerid(r) for r in (R1, R2, R3)]) manager.simple_path_requirement(db.subnet(R3, D2), [db.routerid(r) for r in (R1, R4, R3)]) try: manager.run() except KeyboardInterrupt: manager.stop()
def __init__(self, fwd_dags, optimizer, additional_routes=None): self.igp_graph = nx.DiGraph() self.dirty = False self.additional_routes = additional_routes self.optimizer = optimizer self.fwd_dags = fwd_dags self.current_lsas = set([]) self.json_proxy = SJMPClient(hostname=CFG.get(DEFAULTSECT, 'json_hostname'), port=CFG.getint(DEFAULTSECT, 'json_port'), target=self) self.quagga_manager = ProxyCloner(FakeNodeProxy, self.json_proxy)
def __init__(self, node, port_name, port_ip): """ :param node: The node owning this link :param port_name: The name of the only port visible on this link :param port_ip: The IPV4Address of that link """ section = port_name if CFG.has_section(port_name) else 'physical' self.src = Port(node, self, port_name, hello_int=CFG.get(section, 'hello_interval'), dead_int=CFG.get(section, 'dead_interval'), area=CFG.get(section, 'area'), cost=CFG.get(section, 'cost')) self.src.move_in_namespace() self.src.set_ip(port_ip)
def __init__(self, node, port_name, port_ip): """ :param node: The node owning this link :param port_name: The name of the only port visible on this link :param port_ip: The IPV4Address of that link """ section = port_name if CFG.has_section(port_name) else 'physical' self.src = Port(node, self, port_name, hello_int=CFG.get(section, 'hello_interval'), dead_int=CFG.get(section, 'dead_interval'), area=CFG.get(section, 'area'), cost=CFG.get(section, 'cost')) self.src.move_in_namespace() self.src.set_ip(port_ip) self.node = node self.name = port_name
def launch_controller(): CFG.read(C1_cfg) db = TopologyDB(db='/tmp/db.topo') manager = SouthboundManager(optimizer=OSPFSimple()) prefix = db.subnet('r2', 'sw1') prefix_m = prefix.split('/')[0] + '/25' manager.simple_path_requirement(prefix_m, [db.routerid(r) for r in (R1, R3, R2)]) import ipdb; ipdb.set_trace() try: manager.run() except KeyboardInterrupt: manager.stop()
def gen_physical_ports(port_list): """ Find all enabled physical interfaces of this :param port_list: The list of all physical ports that should be analyzed :return: A list of Tuple (interface name, ip address) for each active physical interface """ ports = [] for port_name in port_list: try: ip = ip_interface(CFG.get(port_name, 'ip')) ports.append((port_name, ip)) except ConfigError: try: out = subprocess.check_output(['ip', 'a', 'show', port_name]) for line in out.splitlines(): if 'inet ' in line: line = line.strip(' \t\n') port_addr = ip_interface(line.split(' ')[1]) log.debug('Added physical port %s@%s', port_name, port_addr) ports.append((port_name, port_addr)) break # TODO support multiple IP/interface? except subprocess.CalledProcessError as e: log.exception(e) return ports
def __init__(self, cfg=None, optimizer=OSPFSimple(), additional_routes=None, *args, **kwargs): if cfg: # reading CFG config CFG.read(pathtoREScfg) CFG.read(cfg) self.simple_req = {} self.change_pfx = [ ] #TODO IF prefixfix is already present before adding it super(SouthBoundExtended, self).__init__(optimizer=optimizer, *args, **kwargs)
def set_up_ns_ID(self): """ update the starting ID of NetworkNamespace Class According to instance_nbr """ instance_count = CFG.getint(DEFAULTSECT, 'controller_instance_number') NetworkNamespace.ID = NetworkNamespace.ID + 10 * instance_count
def __init__(self, node, link, id=None, cost=CFG.get('fake', 'cost'), dead_int=CFG.get('fake', 'dead_interval'), hello_int=CFG.get('fake', 'hello_interval'), area=CFG.get('fake', 'area')): """ :param node: The node owning this port :param link: The link in which this port belongs :param id: The id of this port, otherwise infer it from the node next available port number :param cost: The OSPF cost of that interface :param dead_int: The OSPF dead interval for that interface :param hello_int: The OSPF Hello interval """ self.node = node self.link = link self.id = '%s-eth%s' % (node.id, node.get_next_port()) if not id else id self.ip_interface = None self.ospf_area = area self.ospf_cost = cost self.ospf_dead_int = dead_int self.ospf_hello_int = hello_int
def __init__(self): self.BASE_NET = ip_network(CFG.get(DEFAULTSECT, 'base_net')) self.private_addresses = PrivateAddressStore(CFG.get(DEFAULTSECT, 'private_ips')) self.last_line = '' self.leader_watchdog = None self.transaction = False self.uncommitted_changes = 0 self.graph = IGPGraph() self._lsdb = {NetworkLSA.TYPE: {}, RouterLSA.TYPE: {}, ASExtLSA.TYPE: {}} self.controllers = defaultdict(list) # controller nr : ip_list self.listener = {} self.keep_running = True self.queue = Queue() self.processing_thread = start_daemon_thread( target=self.process_lsa, name='lsa processing thread')
def apply(self, graph, lsdb): if ip_address(self.routerid) in lsdb.exclude_net and \ CFG.getboolean(DEFAULTSECT, 'exclude_fake_lsa'): log.debug('Skipping AS-external Fake LSA %s via %s', self.address, [self.resolve_fwd_addr(r.fwd_addr) for r in self.routes]) return for route in self.routes: graph.add_edge(self.resolve_fwd_addr(route.fwd_addr), self.prefix, metric=route.metric)
def __init__(self): self.BASE_NET = ip_network(CFG.get(DEFAULTSECT, 'base_net')) self.private_addresses = PrivateAddressStore(CFG.get(DEFAULTSECT, 'private_ips')) self.last_line = '' self.leader_watchdog = None self.transaction = None self.graph = IGPGraph() self.routers = {} # router-id : lsa self.networks = {} # DR IP : lsa self.ext_networks = {} # (router-id, dest) : lsa self.controllers = defaultdict(list) # controller nr : ip_list self.listener = {} self.keep_running = True self.queue = Queue() self.processing_thread = Thread(target=self.process_lsa, name="lsa_processing_thread") self.processing_thread.setDaemon(True) self.processing_thread.start()
def __init__(self): self.BASE_NET = ip_network(CFG.get(DEFAULTSECT, 'base_net')) self.private_addresses = PrivateAddressStore( CFG.get(DEFAULTSECT, 'private_ips')) self.last_line = '' self.leader_watchdog = None self.transaction = None self.graph = IGPGraph() self.routers = {} # router-id : lsa self.networks = {} # DR IP : lsa self.ext_networks = {} # (router-id, dest) : lsa self.controllers = defaultdict(list) # controller nr : ip_list self.listener = {} self.keep_running = True self.queue = Queue() self.processing_thread = Thread(target=self.process_lsa, name="lsa_processing_thread") self.processing_thread.setDaemon(True) self.processing_thread.start()
def __init__(self, router, *args, **kwargs): super(MininetRouterConfig, self).__init__(router, *args, **kwargs) self.ospf.redistribute.connected = 1000 self.ospf.redistribute.static = 1000 self.ospf.router_id = router.id # Parse LSA throttling parameters delay = CFG.get("DEFAULT", 'delay') initial_holdtime = CFG.get("DEFAULT", 'initial_holdtime') max_holdtime = CFG.get("DEFAULT", 'max_holdtime') # Parse minimum LS intervals min_ls_interval = CFG.get("DEFAULT", 'min_ls_interval') min_ls_arrival = CFG.get("DEFAULT", 'min_ls_arrival') self.ospf.throttling = ConfigDict(spf=ConfigDict(delay=delay, initial_holdtime=initial_holdtime, max_holdtime=max_holdtime), lsa_all=ConfigDict(min_ls_interval=min_ls_interval)) self.ospf.lsa = ConfigDict(min_ls_arrival=min_ls_arrival)
def __init__(self, address, metric, node): """ :param address: The forwarding address to specify for this attraction point :param metric: The metric of this attraction point :param node: The node advertizing this :return: """ self._address = address self.metric = metric self.node = node self.advertized = False self.ttl = CFG.get(DEFAULTSECT, 'fake_lsa_ttl')
def write_CFG_ospf(config) : # create new parser cfg = cparser.ConfigParser() # read the template cfg.read(template) conf = config['config'] fibbing_ctrl = conf.get('fibbing-controller') controller_o_conf = fibbing_ctrl.get('controller-config').get('ospf') th_initial_holdtime = controller_o_conf.get('throttle').get('initial_holdtime') th_delay = controller_o_conf.get('throttle').get('delay') th_max_holdtime = controller_o_conf.get('throttle').get('max_holdtime') min_ls_arrival = controller_o_conf.get('lsa').get('min_ls_arrival') min_ls_interval = controller_o_conf.get('lsa').get('min_ls_interval') hello = controller_o_conf.get('hello-interval') dead = controller_o_conf.get('dead-interval') private_net = fibbing_ctrl.get('controller-config').get('private-ip-prefix') base_net = fibbing_ctrl.get('controller-config').get('base-net-perfix') cfg.set(cparser.DEFAULTSECT, 'area', str(controller_o_conf.get('area'))) cfg.set(cparser.DEFAULTSECT, 'initial_holdtime', int(th_initial_holdtime)) cfg.set(cparser.DEFAULTSECT, 'delay', int(th_delay)) cfg.set(cparser.DEFAULTSECT, 'max_holdtime', int(th_max_holdtime)) cfg.set(cparser.DEFAULTSECT, 'min_ls_interval', int(min_ls_interval)) cfg.set(cparser.DEFAULTSECT, 'min_ls_arrival', int(min_ls_arrival)) cfg.set(cparser.DEFAULTSECT, 'hello_interval', str(hello)) cfg.set(cparser.DEFAULTSECT, 'dead_interval', str(dead)) # set private ips needed for the fibbing controller cfg.set(cparser.DEFAULTSECT, 'private_net', str(private_net)) cfg.set(cparser.DEFAULTSECT, 'base_net', str(base_net)) # overrite the default.cfg file with open(pathtoREScfg, 'w') as f : cfg.write(f) # reload the configuration CFG.read(pathtoREScfg)
def update_graph(self, new_graph): added_edges = graph_diff(new_graph, self.graph) removed_edges = graph_diff(self.graph, new_graph) # Propagate differences if len(added_edges) > 0 or len(removed_edges) > 0: log.debug('Pushing changes') for u, v in added_edges: self.listener_add_edge(u, v, new_graph[u][v]['metric']) for u, v in removed_edges: self.listener_remove_edge(u, v) if CFG.getboolean(DEFAULTSECT, 'draw_graph'): draw_graph(new_graph) self.graph = new_graph log.info('LSA update yielded +%d -%d edges changes' % (len(added_edges), len(removed_edges)))
def draw_graph(graph): try: layout = spring_layout(graph) metrics = { (src, dst): data['metric'] for src, dst, data in graph.edges_iter(data=True) } draw_networkx_edge_labels(graph, layout, edge_labels=metrics) draw(graph, layout, node_size=20) draw_networkx_labels(graph, layout, labels={n: n for n in graph}) output = CFG.get(DEFAULTSECT, 'graph_loc') if os.path.exists(output): os.unlink(output) plt.savefig(output) plt.close() log.debug('Graph of %d nodes saved in %s', len(graph), output) except: pass
def build_graph(self): self.controllers.clear() new_graph = IGPGraph() # Rebuild the graph from the LSDB for lsa in chain(self.routers.itervalues(), self.networks.itervalues(), self.ext_networks.itervalues()): if is_expired_lsa(lsa): log.debug("LSA %s is too old (%d) ignoring it!", lsa, lsa.age) else: lsa.apply(new_graph, self) # Contract all IPs to their respective router-id for rlsa in self.routers.itervalues(): rlsa.contract_graph(new_graph, self.private_addresses .addresses_of(rlsa.routerid)) # Figure out the controllers layout controller_prefix = CFG.getint(DEFAULTSECT, 'controller_prefixlen') # Group by controller and log them for ip in new_graph.nodes_iter(): try: addr = ip_address(ip) except ValueError: continue # Have a prefix if addr in self.BASE_NET: """1. Compute address diff to remove base_net 2. Right shift to remove host bits 3. Mask with controller mask""" cid = (((int(addr) - int(self.BASE_NET.network_address)) >> self.BASE_NET.max_prefixlen - controller_prefix) & ((1 << controller_prefix) - 1)) self.controllers[cid].append(ip) # Contract them on the graph for id, ips in self.controllers.iteritems(): cname = 'C_%s' % id new_graph.add_controller(cname) new_graph.contract(cname, ips) # Remove generated self loops new_graph.remove_edges_from(new_graph.selfloop_edges()) self.apply_secondary_addresses(new_graph) return new_graph
def start(self, phys_ports, nodecount=None): """ Start the fibbing network :param nodecount: Pre-allocate nodecount fibbing nodes """ # Create root node self.root = self.add_node(id='root', cls=RootRouter, start=False) self.root.lsdb.set_leader_watchdog(self) del self.nodes[self.root.id] # The root node should not originate LSA self.graph_thread.start() self.json_thread.start() # And map all physical ports to it ports = gen_physical_ports(phys_ports) for name, addr in ports: link = PhysicalLink(self.root, name, addr) self.root.add_physical_link(link) self.root.start() # Create additional nodes if requested if nodecount is None: nodecount = CFG.getint(DEFAULTSECT, 'initial_node_count') while nodecount > 0: self.add_node() nodecount -= 1
class TestCLI(Cmd): Cmd.prompt = "> " def __init__(self, client, *args, **kwargs): Cmd.__init__(self, *args, **kwargs) self.client = client def do_add(self, line=""): self.client.add(("192.168.14.1", "192.168.23.2", 1, "3.3.3.0/24")) self.client.add((None, "192.168.23.2", 1, "4.4.4.0/24")) self.client.add([(None, "192.168.23.2", 1, "5.5.5.0/24"), (None, "192.168.14.1", 1, "5.5.5.0/24")]) def do_remove(self, line=""): self.client.remove(("192.168.14.1", "192.168.23.2", "3.3.3.0/24")) self.client.remove((None, "192.168.23.2", "4.4.4.0/24")) self.client.remove([(None, "192.168.23.2", "5.5.5.0/24"), (None, "192.168.14.1", "5.5.5.0/24")]) def do_exit(self, line): return True if __name__ == "__main__": log.setLevel(logging.DEBUG) shapeshifter = ShapeshifterProxyTest() c = SJMPClient("localhost", CFG.getint(DEFAULTSECT, "json_port"), target=shapeshifter) fakenode = ProxyCloner(FakeNodeProxy, c) Thread(target=c.communicate, name="client").start() TestCLI(fakenode).cmdloop() c.stop()
def __init__(self, congestionThreshold = 0.95): """It basically reads the network topology from the MyGraphProvider, which is running in another thread because SouthboundManager.run() is blocking. Here we are assuming that the topology does not change. """ # Dictionary that keeps the allocation of the flows in the network paths self.flow_allocation = {} # {prefixA: {flow1 : [path_list], flow2 : [path_list]}, # prefixB: {flow4 : [path_list], flow3 : [path_list]}} # Lock to make flow_allocation thread-safe self.flowAllocationLock = threading.Lock() # From where to read events self.eventQueue = eventQueue # Used to schedule flow alloc. removals self.thread_handlers = {} # Data structure that holds the current forwarding dags for # all advertised destinations in the network self.dagsLock = threading.Lock() self.dags = {} # Set the congestion threshold self.congestionThreshold = congestionThreshold t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - Congestion Threshold is set to %.2f%% of the link\n"%(t, (self.congestionThreshold)*100.0)) # Used to stop the thread self._stop = threading.Event() # Object that handles the topology database self.db = DatabaseHandler() # Connects to the southbound controller. Must be called before # create instance of SouthboundManager CFG.read(dconf.C1_Cfg) # Start the Southbound manager in a different thread. self.sbmanager = MyGraphProvider() t = threading.Thread(target=self.sbmanager.run, name="Graph Listener") t.start() t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - Graph Listener thread started\n"%t) # Blocks until initial graph arrived notification is received # from southbound manager HAS_INITIAL_GRAPH.wait() t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - Initial graph received\n"%t) # Retreieve network graph from southbound manager self.network_graph = self.sbmanager.igp_graph # Mantains the list of the network prefixes advertised by the OSPF routers self.ospf_prefixes = self._fillInitialOSPFPrefixes() t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - Initial OSPF prefixes read\n"%t) # Include BW data inside the initial graph. n_router_links = self._countRouter2RouterEdges() self._readBwDataFromDB() i = 0 while not self._bwInAllRouterEdges(n_router_links): i += 1 time.sleep(1) self._readBwDataFromDB() t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - Bandwidths written in network_graph after %d iterations\n"%(t,i)) # Read the initial graph. We keep this as a copy of the # physical topology. In initial graph, the instantaneous # capacities of the links are kept. self.initial_graph = self.network_graph.copy() t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - Created IP-names bindings\n"%t) log.info("\tHostname\tip\tsubnet\n") for name, data in self.db.hosts_to_ip.iteritems(): log.info("\t%s\t%s\t%s\n"%(name, data['iface_host'], data['iface_router'])) log.info("\tRouter name\tip\t\n") for name, ip in self.db.routers_to_ip.iteritems(): log.info("\t%s\t%s\n"%(name, ip)) # Create here the initial DAGS for each destination in the # network self._createInitialDags() t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - Initial DAGS created\n"%t) # Spawn Json listener thread jl = JsonListener(self.eventQueue) jl.start() t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - Json listener thread created\n"%t) # Create attributes self.feedbackRequestQueue = feedbackRequestQueue self.feedbackResponseQueue = feedbackResponseQueue # Dict in which we save flows pending for allocation feedback self.pendingForFeedback = {} # Spawn FeedbackThread ft = feedbackThread(self.feedbackRequestQueue, self.feedbackResponseQueue) ft.start() t = time.strftime("%H:%M:%S", time.gmtime()) log.info("%s - feedbackThread started\n"%t)
def __init__(self, client, *args, **kwargs): Cmd.__init__(self, *args, **kwargs) self.client = client def do_add(self, line=''): self.client.add(('192.168.14.1', '192.168.23.2', 1, '3.3.3.0/24')) self.client.add((None, '192.168.23.2', 1, '4.4.4.0/24')) self.client.add([(None, '192.168.23.2', 1, '5.5.5.0/24'), (None, '192.168.14.1', 1, '5.5.5.0/24')]) def do_remove(self, line=''): self.client.remove(('192.168.14.1', '192.168.23.2', '3.3.3.0/24')) self.client.remove((None, '192.168.23.2', '4.4.4.0/24')) self.client.remove([(None, '192.168.23.2', '5.5.5.0/24'), (None, '192.168.14.1', '5.5.5.0/24')]) def do_exit(self, line): return True if __name__ == '__main__': log.setLevel(logging.DEBUG) shapeshifter = ShapeshifterProxyTest() c = SJMPClient("localhost", CFG.getint(DEFAULTSECT, "json_port"), target=shapeshifter) fakenode = ProxyCloner(FakeNodeProxy, c) Thread(target=c.communicate, name='client').start() TestCLI(fakenode).cmdloop() c.stop()