def throttle(self): active_conns = cbpx_stats.c_endpoints/2 throttle_step = 0.01 l.debug("Throttle?: %i connections (%i limit)" % (active_conns, int(params.max_open_conns))) while active_conns >= int(params.max_open_conns): time.sleep(throttle_step) active_conns = cbpx_stats.c_endpoints/2
def __init__(self, port): Thread.__init__(self, name="Listener") l.debug("Setting up listener (backlog: %i)" % int(params.listen_backlog)) self.sock = socket(AF_INET, SOCK_STREAM) self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) self.sock.bind(('', port)) self.sock.listen(int(params.listen_backlog))
def __init__(self): # start gathering statistics try: l.debug("Proxy setting up stats") self.stats = cbpx_stats() except Exception, e: raise RuntimeError("Error setting up stats: %s" % str(e))
def sanitize(self, command): sanitized = command # allow only one-time stats if command.startswith("stats") and command != "stats": l.debug("Sanitizing command: '%s'" % command) sanitized = "stats" return sanitized
def process_connection(self, n_sock, n_addr): l.debug("Processing connection from: %s" % str(n_addr)) try: fwd_sock = socket(AF_INET, SOCK_STREAM) fwd_sock.connect(relay.get_active()) except IOError, (errno, strerror): l.error("Error extablishing connection to backend: I/O error(%i): %s" % (errno, strerror)) n_sock.close() return
def run(self): while not self.quit: l.debug("Waiting for stats timer...") self.fin.wait(cbpx_stats.sleep) #cbpx_stats.ticks += 1 cbpx_stats.s_qc = (cbpx_stats.c_qc - cbpx_stats.l_qc) * (1/cbpx_stats.sleep) cbpx_stats.s_dqc = (cbpx_stats.c_dqc - cbpx_stats.l_dqc) * (1/cbpx_stats.sleep) cbpx_stats.l_qc = cbpx_stats.c_qc cbpx_stats.l_dqc = cbpx_stats.c_dqc
def __auth(self, api_url): """Auth to Opendaylight API""" auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password(realm='opendaylight', uri=api_url, user=params.odl_user, passwd=params.odl_password) opener = urllib2.build_opener(auth_handler) urllib2.install_opener(opener) l.debug("Authorize to OpenDayLight Controller, url: %s" % api_url)
def __init__(self): l.debug("Starting readline user interface") self.__print_logo() try: readline.read_history_file(params.history_file) except IOError: file(params.history_file, "w") l.info("Create history file. File path: %s" % (params.history_file)) print " Ready for your commands, my master.\n"
def main(): l.info("Starting IP Rotator version %s" % (__version__)) odl = opendaylight() while True: ip_address = ip() for switch in params.switch: flow = { 'name_in': '%s_in_%s' % (params.flow_prefix, ip_address.replace('.','_')), 'name_out': '%s_out_%s' % (params.flow_prefix, ip_address.replace('.','_')), 'priority': params.flow_priority } current_flows = odl.get_flow(node={'id': switch}) #Check priority of current flows for name, value in current_flows.items(): if re.search(params.flow_prefix, name): if int(flow['priority']) == int(value['priority']): flow['priority'] = str(int(value['priority'])+1) l.debug("Priority for flows: %s" % (flow['priority'])) #Add new flows try: for port in params.flow_port: odl.add_flow(name=flow['name_in']+'_'+port, installInHw='true', node={"id":switch, "type": "OF"}, priority=flow['priority'], etherType='0x800', protocol='tcp', nwDst=ip_address, tpSrc=port, actions=['SET_NW_DST='+params.outgoing_ip, 'HW_PATH'] ) odl.add_flow(name=flow['name_out']+'_'+port, installInHw='true', node={"id":switch, "type": "OF"}, priority=flow['priority'], etherType='0x800', protocol='tcp', tpDst=port, tosBits=params.flow_tos_bits, actions=['SET_NW_SRC='+ip_address, 'HW_PATH'] ) finally: l.info("I've set IP %s" % (ip_address)) for name, value in current_flows.items(): if re.search(params.flow_prefix, name): odl.delete_flow(node={"id":switch}, name=name) time.sleep(params.rotate_time)
def __init__(self): l.debug("New transporter") Thread.__init__(self, name="Transport") self.EPOLL_EVENTS = select.EPOLLIN | select.EPOLLPRI | select.EPOLLERR | select.EPOLLHUP | select.EPOLLRDBAND self.DATA_READY = select.EPOLLIN | select.EPOLLPRI | select.EPOLLRDBAND self.CONN_STATE = select.EPOLLERR | select.EPOLLHUP self.poller = select.epoll() self.fd = {} self.quit = False self.dead = set() self.conn_lock = Lock()
def run(self): l.debug("Running listener") while True: l.debug("Awaiting new connection") try: # wait for new connection (n_sock, n_addr) = self.sock.accept() except Exception, e: l.error("Error accepting connection: " + str(e)) break l.debug("New connection from: %s" % str(n_addr)) # if there are more queued connections than allowed qc = conn_q.qsize() if qc >= int(params.max_queued_conns): l.warning("Queued %i connections, limit is %i" % (qc, int(params.max_queued_conns))) # if we were switching, than sorry, but not anymore l.info("Enabling relaying") relay.set("connection limit reached") try: conn_q.put([n_sock, n_addr], False) cbpx_stats.c_qc += 1 l.debug("Enqueued connection: %s" % str(n_addr)) except Full: l.error("Queue is full with %i elements!" % qc) l.info("Enabling relaying") relay.set("connection queue full") except Exception, e: l.warning("Exception during connection enqueue: %s" % str(e))
def get_switch(self): """Gets list of all switches""" result=[] api_url = "%s/controller/nb/v2/switchmanager/default/nodes" % (params.odl_server) self.__auth(api_url) req=urllib2.Request(url=api_url) f = urllib2.urlopen(req) for node in json.loads(f.read())['nodeProperties']: result.append(node['node']['id']) l.debug("Added node: %s" % node['node']['id']) return result
def run(self): rd = [] l.debug("Running transporter loop") while not self.quit: # wait for events on all tracked fds try: rd = self.poller.poll(0.2) except Exception, e: l.warning("Exception while poll(): %s" % str(e)) # iterate over all events returned by epoll(): for f, event in rd: # if wata is waiting to be read if event & self.DATA_READY: # read the data data = "" try: data = self.fd[f][0].recv(int(params.net_buffer_size)) except Exception, e: l.warning("Exception %s while reading data: %s" % (type(e), str(e))) self.dead.add(f) self.dead.add(self.fd[f][2]) continue # no data means connection closed if not data: self.dead.add(f) self.dead.add(self.fd[f][2]) continue else: # pass the data to the other end try: # TODO: retransmission should be handled better sent = self.fd[f][1].send(data) if sent != len(data): l.error("APOCALYPSE! Transmitted only %i bytes of received %i bytes" % (sent, len(data))) except Exception, e: l.warning("Exception %s while transmitting data: %s" % (type(e), str(e))) self.dead.add(self.fd[f][2]) self.dead.add(f) continue
def add(self, backend, client): fd_backend = backend.fileno() fd_client = client.fileno() l.debug("Adding fd: %i %i" % (fd_backend, fd_client)) self.conn_lock.acquire() self.fd[fd_backend] = [backend, client, fd_client] self.fd[fd_client] = [client, backend, fd_backend] self.poller.register(fd_client, self.EPOLL_EVENTS) self.poller.register(fd_backend, self.EPOLL_EVENTS) cbpx_stats.c_endpoints = len(self.fd) self.conn_lock.release()
def remove(self, f): self.conn_lock.acquire() sock = self.fd[f][0] l.debug("Removing fd: %i" % f) self.poller.unregister(f) del self.fd[f] cbpx_stats.c_endpoints = len(self.fd) self.conn_lock.release() try: sock.shutdown(SHUT_RDWR) sock.close() except: pass
def add_flow(self, **kwargs): api_url=("%s/controller/nb/v2/flowprogrammer/default/node/OF/%s/staticFlow/%s" % (params.odl_server, kwargs['node']['id'], kwargs['name'])) self.__auth(api_url) request = urllib2.Request(api_url,data=json.dumps(kwargs)) request.add_header('Content-Type', 'application/json') request.get_method = lambda: 'PUT' connection = urllib2.urlopen(request) l.debug("Flow:") for k,v in kwargs.items(): l.debug("%s: %s" %(k,v)) if connection.code != 200 and connection.code != 201: l.error("Error during adding flow, HTTP response: %s" % connection.code) raise Exception("Error during adding flow, HTTP response: %s" % connection.code) return connection.code
def cmd_set(self, args): # no arguments = prit current settings if len(args) == 0: self.print_cfg() return # wrong number of arguments if len(args) != 2: self.ui.write("Use: 'set PARAMETER VALUE' to change setting") return # check if parameter is available in configuration if not hasattr(params, args[0]): self.ui.write(" No such parameter: %s" % args[0]) return # check if parameter can be set if args[0] not in params.settable: self.ui.write(" Paremeter is not settable: %s" % args[0]) return # check if value type is correct try: v_test = params.settable[args[0]][0] (args[1]) except: self.ui.write(" VALUE for '%s' must be of %s" % (args[0], params.settable[args[0]][0])) return # check if value range is correct if v_test < params.settable[args[0]][1] or v_test > params.settable[args[0]][2]: self.ui.write(" %s must be between %s and %s" % (args[0], str(params.settable[args[0]][1]), str(params.settable[args[0]][2]))) return # everything looks fine, set the parameter l.debug("Setting '%s' to '%s'" % (args[0], args[1])) try: params.__dict__[args[0]] = args[1] except Exception, e: self.ui.write(" Could not set parameter '%s' to '%s', error: %s" % (args[0], args[1], str(e))) l.warning("Could not set parameter '%s' to '%s', error: %s" % (args[0], args[1], str(e))) return
def read(self): l.debug("Awaiting TCP command connection...") (self.rc_conn, self.rc_addr) = self.rc_sock.accept() l.debug("TCP commnd connection from: %s" % str(self.rc_addr)) l.debug("Awaiting network command...") line = self.rc_conn.recv(int(params.net_buffer_size)) return self.sanitize(line)
def process_command(self): try: line = self.ui.read() if not line: return l.info("Got command: '%s'" % line) l_cmd = line.split(" ")[0] l_args = line.split(" ")[1:] if l_cmd and (l_cmd not in self.commands.keys()): self.ui.write(" Unknown command: '%s'" % l_cmd) self.ui.finish() else: res = self.commands[l_cmd][0](l_args) self.ui.finish() return res except KeyboardInterrupt: l.debug("Got KeyboardInterrupt, ignoring") self.ui.write("") except EOFError: self.ui.write("") except Exception, e: l.warning("Exception %s: %s" % (type(e), str(e)))
def get_flow(self, **kwargs): result={} api_url="%s/controller/nb/v2/flowprogrammer/default/node/OF/%s" % (params.odl_server, kwargs['node']['id']) self.__auth(api_url) request = urllib2.Request(api_url) request.add_header('Content-Type', 'application/json') connection = urllib2.urlopen(request) for k in json.loads(connection.read())['flowConfig']: result[k['name']]=k #Debug l.debug("Flows list:") for k,v in result.items(): l.debug("Flow %s" % k) for flow_key,flow_value in v.items(): l.debug("%s: %s" % (flow_key,flow_value)) l.debug("") return result
def cmd_switch(self, args): active = relay.get_active() (ai, ap) = active (si, sp) = relay.get_standby() self.ui.write(" Starting switch: %s:%i -> %s:%i, timeout: %2.2f s, %i connections buffer\n" % (ai, ap, si, sp, float(params.switch_max_time), int(params.max_queued_conns))) l.info("Starting switch: %s:%i -> %s:%i, timeout: %2.2f s, %i connections buffer" % (ai, ap, si, sp, float(params.switch_max_time), int(params.max_queued_conns))) # stop relaying connections now relay.clear("switch started") # set the timer for max switch time switch_timer = SwitchTimer(float(params.switch_max_time)) switch_timer.start() switch_start = time.time() # print initial stats self.print_stats(True, 0) l.debug("About to enter switch loop") while not relay.isSet(): try: l.debug("Switch loop wait") threading.Event().wait(float(params.switch_loop_wait)) # print stats waited = time.time() - switch_start self.print_stats(False, waited) l.debug("Switch active, waited: %2.2f" % waited) except Exception, e: l.info("Exception in 'switch' loop: %s" % str(e)) except KeyboardInterrupt: l.warning("Ctrl-c in switch loop, break") self.ui.write(" Ctrl-c") relay.set("ctrl-c")
def kill_script(): l.debug("Trying to kill switch finalize script (just in case)") global script try: script.kill() except: pass
def shutdown(self): l.debug("Shutting down TCP user interface") self.rc_sock.shutdown(SHUT_RDWR) self.rc_sock.close()
def __init__(self): l.debug("Starting TCP user interface on port %i" % int(params.rc_port)) self.rc_sock = socket(AF_INET, SOCK_STREAM) self.rc_sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) self.rc_sock.bind(('', int(params.rc_port))) self.rc_sock.listen(5)
def close(self): l.debug("Proxy closing listener") try: self.listener.close() except: pass l.debug("Proxy joining listener") self.listener.join() l.debug("Proxy closing connectors") for c in self.connectors: try: c.close() except: pass l.debug("Proxy joining connectors") for c in self.connectors: c.join() l.debug("Proxy closing stats") try: self.stats.close() except: pass l.debug("Proxy joining stats") self.stats.join() l.debug("Proxy closing transporter") try: self.transporter.close() except: pass l.debug("Proxy killing connections (if any) on close()") self.transporter.kill_connections() l.debug("Proxy joining transporter") self.transporter.join()
def run(self): l.debug("Proxy starting stats") self.stats.start() l.debug("Proxy starting transporter") self.transporter.start() l.debug("Proxy starting connectors") for c in self.connectors: c.start() l.debug("Proxy starting listener") self.listener.start() l.debug("Proxy starting command interface") self.cmd.run() l.debug("Exiting proxy run() loop")
except Exception, e: raise RuntimeError("Error setting up stats: %s" % str(e)) # start transporter try: l.debug("Proxy setting up Transporter") self.transporter = cbpx_transporter() except Exception, e: raise RuntimeError("Error setting up transporter: %s" % str(e)) # start connector self.connectors = [] try: l.debug("Proxy setting up Connectors") # start 5 connectors to make sure connections are processed even if TCP connection to the backend # stalls due to lost syn/syn-ack/ack packet for i in range(0, 5): self.connectors.append(cbpx_connector(params.active_ip, params.active_port, params.standby_ip, params.standby_port, self.transporter)) except Exception, e: raise RuntimeError("Error setting up connector: %s" % str(e)) # start listener try: l.debug("Proxy setting up Listener") self.listener = cbpx_listener(params.port) except Exception, e: raise RuntimeError("Error setting up listener: %s" % str(e))
def close(self): l.debug("Closing stats") self.quit = True
def __init__(self): l.debug("Starting stats") Thread.__init__(self, name="Stats") self.quit = False self.fin = Event()
def remove_dead(self): l.debug("Dead sockets to remove: %s" % str(self.dead)) for f in set(self.dead): try: self.remove(f) except: pass self.dead.discard(f)
relay.set("connection limit reached") try: conn_q.put([n_sock, n_addr], False) cbpx_stats.c_qc += 1 l.debug("Enqueued connection: %s" % str(n_addr)) except Full: l.error("Queue is full with %i elements!" % qc) l.info("Enabling relaying") relay.set("connection queue full") except Exception, e: l.warning("Exception during connection enqueue: %s" % str(e)) l.debug("Exiting listener loop") # ------------------------------------------------------------------------ class cbpx_connector(Thread): quit = 0 # -------------------------------------------------------------------- def __init__(self, ai, ap, si, sp, transporter): Thread.__init__(self, name="Connector") l.debug("Initializing connector") self.transporter = transporter relay.set_backends(ai, ap, si, sp) relay.set("connector started")
def run(self): l.debug("Running connector") while not cbpx_connector.quit: l.debug("Waiting until relay event is set") relay.wait() l.debug("Trying to get connection from queue...") try: # throttle if throttling enabled if int(params.max_open_conns) > 0: self.throttle() i = conn_q.get(True, 1) cbpx_stats.c_dqc += 1 l.debug("Dequeue connection: %s (%i in queue)" % (str(i[1]), conn_q.qsize())) self.process_connection(i[0], i[1]) conn_q.task_done() except Empty: l.debug("Connection queue empty") if cbpx_connector.quit: l.debug("Breaking connector loop on quit") break l.debug("Exiting connector loop")
def close(self): l.debug("Shutting down listener socket") self.sock.shutdown(SHUT_RDWR) self.sock.close() l.debug("Listener socket closed")
def __init__(self, ai, ap, si, sp, transporter): Thread.__init__(self, name="Connector") l.debug("Initializing connector") self.transporter = transporter relay.set_backends(ai, ap, si, sp) relay.set("connector started")
def close(self): l.debug("Closing transporter") self.quit = True