class Reactor (object): class Exit (object): normal = 0 validate = 0 listening = 1 configuration = 1 privileges = 1 log = 1 pid = 1 socket = 1 io_error = 1 process = 1 select = 1 unknown = 1 # [hex(ord(c)) for c in os.popen('clear').read()] clear = concat_bytes_i(character(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']) def __init__ (self, configurations): self._ips = environment.settings().tcp.bind self._port = environment.settings().tcp.port self._stopping = environment.settings().tcp.once self.exit_code = self.Exit.unknown self.max_loop_time = environment.settings().reactor.speed self._sleep_time = self.max_loop_time / 100 self._busyspin = {} self.early_drop = environment.settings().daemon.drop self.processes = None self.configuration = Configuration(configurations) self.logger = Logger() self.asynchronous = ASYNC() self.signal = Signal() self.daemon = Daemon(self) self.listener = Listener(self) self.api = API(self) self.peers = {} self._reload_processes = False self._saved_pid = False def _termination (self,reason, exit_code): self.exit_code = exit_code self.signal.received = Signal.SHUTDOWN self.logger.critical(reason,'reactor') def _prevent_spin(self): second = int(time.time()) if not second in self._busyspin: self._busyspin = {second: 0} self._busyspin[second] += 1 if self._busyspin[second] > self.max_loop_time: time.sleep(self._sleep_time) return True return False def _api_ready (self,sockets,sleeptime): fds = self.processes.fds() ios = fds + sockets try: read,_,_ = select.select(ios,[],[],sleeptime) for fd in fds: if fd in read: read.remove(fd) return read except select.error as exc: err_no,message = exc.args # pylint: disable=W0633 if err_no not in error.block: raise exc self._prevent_spin() return [] except socket.error as exc: # python 3 does not raise on closed FD, but python2 does # we have lost a peer and it is causing the select # to complain, the code will self-heal, ignore the issue # (EBADF from python2 must be ignored if when checkign error.fatal) # otherwise sending notification causes TCP to drop and cause # this code to kill ExaBGP self._prevent_spin() return [] except ValueError as exc: # The peer closing the TCP connection lead to a negative file descritor self._prevent_spin() return [] except KeyboardInterrupt: self._termination('^C received',self.Exit.normal) return [] def _active_peers (self): peers = set() for key,peer in self.peers.items(): if not peer.neighbor.passive or peer.proto: peers.add(key) return peers def _completed (self,peers): for peer in peers: if self.peers[peer].neighbor.rib.outgoing.pending(): return False return True def run (self, validate, root): self.daemon.daemonise() # Make sure we create processes once we have closed file descriptor # unfortunately, this must be done before reading the configuration file # so we can not do it with dropped privileges self.processes = Processes() # we have to read the configuration possibly with root privileges # as we need the MD5 information when we bind, and root is needed # to bind to a port < 1024 # this is undesirable as : # - handling user generated data as root should be avoided # - we may not be able to reload the configuration once the privileges are dropped # but I can not see any way to avoid it for ip in self._ips: if not self.listener.listen_on(ip, None, self._port, None, False, None): return self.Exit.listening if not self.load(): return self.Exit.configuration if validate: # only validate configuration self.logger.warning('','configuration') self.logger.warning('parsed Neighbors, un-templated','configuration') self.logger.warning('------------------------------','configuration') self.logger.warning('','configuration') for key in self.peers: self.logger.warning(str(self.peers[key].neighbor),'configuration') self.logger.warning('','configuration') return self.Exit.validate for neighbor in self.configuration.neighbors.values(): if neighbor.listen: if not self.listener.listen_on(neighbor.md5_ip, neighbor.peer_address, neighbor.listen, neighbor.md5_password, neighbor.md5_base64, neighbor.ttl_in): return self.Exit.listening if not self.early_drop: self.processes.start(self.configuration.processes) if not self.daemon.drop_privileges(): self.logger.critical('could not drop privileges to \'%s\' refusing to run as root' % self.daemon.user,'reactor') self.logger.critical('set the environmemnt value exabgp.daemon.user to change the unprivileged user','reactor') return self.Exit.privileges if self.early_drop: self.processes.start(self.configuration.processes) # This is required to make sure we can write in the log location as we now have dropped root privileges if not self.logger.restart(): self.logger.critical('could not setup the logger, aborting','reactor') return self.Exit.log if not self.daemon.savepid(): return self.Exit.pid # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ? reload_completed = False wait = environment.settings().tcp.delay if wait: sleeptime = (wait * 60) - int(time.time()) % (wait * 60) self.logger.debug('waiting for %d seconds before connecting' % sleeptime,'reactor') time.sleep(float(sleeptime)) workers = {} peers = set() while True: try: if self.signal.received: for key in self.peers: if self.peers[key].neighbor.api['signal']: self.peers[key].reactor.processes.signal(self.peers[key].neighbor,self.signal.number) signaled = self.signal.received self.signal.rearm() if signaled == Signal.SHUTDOWN: self.shutdown() break if signaled == Signal.RESTART: self.restart() continue if not reload_completed: continue if signaled == Signal.FULL_RELOAD: self._reload_processes = True if signaled in (Signal.RELOAD, Signal.FULL_RELOAD): self.load() self.processes.start(self.configuration.processes,self._reload_processes) self._reload_processes = False continue if self.listener.incoming(): # check all incoming connection self.asynchronous.schedule(str(uuid.uuid1()),'checking for new connection(s)',self.listener.new_connections()) peers = self._active_peers() if self._completed(peers): reload_completed = True sleep = self._sleep_time # do not attempt to listen on closed sockets even if the peer is still here for io in list(workers.keys()): if io.fileno() == -1: del workers[io] # give a turn to all the peers for key in list(peers): peer = self.peers[key] action = peer.run() # .run() returns an ACTION enum: # * immediate if it wants to be called again # * later if it should be called again but has no work atm # * close if it is finished and is closing down, or restarting if action == ACTION.CLOSE: if key in self.peers: del self.peers[key] peers.discard(key) # we are loosing this peer, not point to schedule more process work elif action == ACTION.LATER: for io in peer.sockets(): workers[io] = key # no need to come back to it before a a full cycle peers.discard(key) elif action == ACTION.NOW: sleep = 0 if not peers: break # read at least on message per process if there is some and parse it for service,command in self.processes.received(): self.api.text(self,service,command) sleep = 0 self.asynchronous.run() for io in self._api_ready(list(workers),sleep): peers.add(workers[io]) del workers[io] if self._stopping and not self.peers.keys(): self._termination('exiting on peer termination',self.Exit.normal) except KeyboardInterrupt: self._termination('^C received',self.Exit.normal) except SystemExit: self._termination('exiting', self.Exit.normal) # socket.error is a subclass of IOError (so catch it first) except socket.error: self._termination('socket error received',self.Exit.socket) except IOError: self._termination('I/O Error received, most likely ^C during IO',self.Exit.io_error) except ProcessError: self._termination('Problem when sending message(s) to helper program, stopping',self.Exit.process) except select.error: self._termination('problem using select, stopping',self.Exit.select) return self.exit_code def shutdown (self): """Terminate all the current BGP connections""" self.logger.critical('performing shutdown','reactor') if self.listener: self.listener.stop() self.listener = None for key in self.peers.keys(): self.peers[key].shutdown() self.asynchronous.clear() self.processes.terminate() self.daemon.removepid() self._stopping = True def load (self): """Reload the configuration and send to the peer the route which changed""" self.logger.notice('performing reload of exabgp %s' % version,'configuration') reloaded = self.configuration.reload() if not reloaded: # # Careful the string below is used but the QA code to check for sucess of failure self.logger.error('problem with the configuration file, no change done','configuration') # Careful the string above is used but the QA code to check for sucess of failure # self.logger.error(str(self.configuration.error),'configuration') return False for key, peer in self.peers.items(): if key not in self.configuration.neighbors: self.logger.debug('removing peer: %s' % peer.neighbor.name(),'reactor') peer.remove() for key, neighbor in self.configuration.neighbors.items(): # new peer if key not in self.peers: self.logger.debug('new peer: %s' % neighbor.name(),'reactor') peer = Peer(neighbor,self) self.peers[key] = peer # modified peer elif self.peers[key].neighbor != neighbor: self.logger.debug('peer definition change, establishing a new connection for %s' % str(key),'reactor') self.peers[key].reestablish(neighbor) # same peer but perhaps not the routes else: # finding what route changed and sending the delta is not obvious self.logger.debug('peer definition identical, updating peer routes if required for %s' % str(key),'reactor') self.peers[key].reconfigure(neighbor) for ip in self._ips: if ip.afi == neighbor.peer_address.afi: self.listener.listen_on(ip, neighbor.peer_address, self._port, neighbor.md5_password, neighbor.md5_base64, None) self.logger.notice('loaded new configuration successfully','reactor') return True def restart (self): """Kill the BGP session and restart it""" self.logger.notice('performing restart of exabgp %s' % version,'reactor') self.configuration.reload() for key in self.peers.keys(): if key not in self.configuration.neighbors.keys(): neighbor = self.configuration.neighbors[key] self.logger.debug('removing Peer %s' % neighbor.name(),'reactor') self.peers[key].remove() else: self.peers[key].reestablish() self.processes.start(self.configuration.processes,True)
class Reactor(object): class Exit(object): normal = 0 validate = 0 listening = 1 configuration = 1 privileges = 1 log = 1 pid = 1 socket = 1 io_error = 1 process = 1 select = 1 unknown = 1 # [hex(ord(c)) for c in os.popen('clear').read()] clear = concat_bytes_i( character(int(c, 16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']) def __init__(self, configurations): self._ips = environment.settings().tcp.bind self._port = environment.settings().tcp.port self._stopping = environment.settings().tcp.once self.exit_code = self.Exit.unknown self.max_loop_time = environment.settings().reactor.speed self._sleep_time = self.max_loop_time / 100 self._busyspin = {} self._ratelimit = {} self.early_drop = environment.settings().daemon.drop self.processes = None self.configuration = Configuration(configurations) self.logger = Logger() self.asynchronous = ASYNC() self.signal = Signal() self.daemon = Daemon(self) self.listener = Listener(self) self.api = API(self) self._peers = {} self._reload_processes = False self._saved_pid = False self._poller = select.poll() def _termination(self, reason, exit_code): self.exit_code = exit_code self.signal.received = Signal.SHUTDOWN self.logger.critical(reason, 'reactor') def _prevent_spin(self): second = int(time.time()) if not second in self._busyspin: self._busyspin = {second: 0} self._busyspin[second] += 1 if self._busyspin[second] > self.max_loop_time: time.sleep(self._sleep_time) return True return False def _rate_limited(self, peer, rate): if rate <= 0: return False second = int(time.time()) ratelimit = self._ratelimit.get(peer, {}) if not second in ratelimit: self._ratelimit[peer] = {second: rate - 1} return False if self._ratelimit[peer][second] > 0: self._ratelimit[peer][second] -= 1 return False return True def _wait_for_io(self, sleeptime): spin_prevention = False try: for fd, event in self._poller.poll(sleeptime): if event & select.POLLIN or event & select.POLLPRI: yield fd continue elif event & select.POLLHUP or event & select.POLLERR or event & select.POLLNVAL: spin_prevention = True continue if spin_prevention: self._prevent_spin() except KeyboardInterrupt: self._termination('^C received', self.Exit.normal) return except Exception: self._prevent_spin() return # peer related functions def active_peers(self): peers = set() for key, peer in self._peers.items(): if not peer.neighbor.passive or peer.proto: peers.add(key) return peers def established_peers(self): peers = set() for key, peer in self._peers.items(): if peer.fsm == FSM.ESTABLISHED: peers.add(key) return peers def peers(self): return list(self._peers) def handle_connection(self, peer_name, connection): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return peer.handle_connection(connection) def neighbor(self, peer_name): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return return peer.neighbor def neighbor_name(self, peer_name): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return "" return peer.neighbor.name() def neighbor_ip(self, peer_name): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return "" return str(peer.neighbor.peer_address) def neighbor_cli_data(self, peer_name): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return "" return peer.cli_data() def neighor_rib(self, peer_name, rib_name, advertised=False): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return [] families = None if advertised: families = peer.proto.negotiated.families if peer.proto else [] rib = peer.neighbor.rib.outgoing if rib_name == 'out' else peer.neighbor.rib.incoming return list(rib.cached_changes(families)) def neighbor_rib_resend(self, peer_name): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return peer.neighbor.rib.outgoing.resend(None, peer.neighbor.route_refresh) def neighbor_rib_out_withdraw(self, peer_name): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return peer.neighbor.rib.outgoing.withdraw(None, peer.neighbor.route_refresh) def neighbor_rib_in_clear(self, peer_name): peer = self._peers.get(peer_name, None) if not peer: self.logger.critical('could not find referenced peer', 'reactor') return peer.neighbor.rib.incoming.clear() # ... def _completed(self, peers): for peer in peers: if self._peers[peer].neighbor.rib.outgoing.pending(): return False return True def run(self, validate, root): self.daemon.daemonise() # Make sure we create processes once we have closed file descriptor # unfortunately, this must be done before reading the configuration file # so we can not do it with dropped privileges self.processes = Processes() # we have to read the configuration possibly with root privileges # as we need the MD5 information when we bind, and root is needed # to bind to a port < 1024 # this is undesirable as : # - handling user generated data as root should be avoided # - we may not be able to reload the configuration once the privileges are dropped # but I can not see any way to avoid it for ip in self._ips: if not self.listener.listen_on(ip, None, self._port, None, False, None): return self.Exit.listening if not self.load(): return self.Exit.configuration if validate: # only validate configuration self.logger.warning('', 'configuration') self.logger.warning('parsed Neighbors, un-templated', 'configuration') self.logger.warning('------------------------------', 'configuration') self.logger.warning('', 'configuration') for key in self._peers: self.logger.warning(str(self._peers[key].neighbor), 'configuration') self.logger.warning('', 'configuration') return self.Exit.validate for neighbor in self.configuration.neighbors.values(): if neighbor.listen: if not self.listener.listen_on( neighbor.md5_ip, neighbor.peer_address, neighbor.listen, neighbor.md5_password, neighbor.md5_base64, neighbor.ttl_in): return self.Exit.listening if not self.early_drop: self.processes.start(self.configuration.processes) if not self.daemon.drop_privileges(): self.logger.critical( 'could not drop privileges to \'%s\' refusing to run as root' % self.daemon.user, 'reactor') self.logger.critical( 'set the environmemnt value exabgp.daemon.user to change the unprivileged user', 'reactor') return self.Exit.privileges if self.early_drop: self.processes.start(self.configuration.processes) # This is required to make sure we can write in the log location as we now have dropped root privileges if not self.logger.restart(): self.logger.critical('could not setup the logger, aborting', 'reactor') return self.Exit.log if not self.daemon.savepid(): return self.Exit.pid # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ? reload_completed = False wait = environment.settings().tcp.delay if wait: sleeptime = (wait * 60) - int(time.time()) % (wait * 60) self.logger.debug( 'waiting for %d seconds before connecting' % sleeptime, 'reactor') time.sleep(float(sleeptime)) workers = {} peers = set() api_fds = [] ms_sleep = int(self._sleep_time * 1000) while True: try: if self.signal.received: for key in self._peers: if self._peers[key].neighbor.api['signal']: self._peers[key].reactor.processes.signal( self._peers[key].neighbor, self.signal.number) signaled = self.signal.received self.signal.rearm() if signaled == Signal.SHUTDOWN: self.exit_code = self.Exit.normal self.shutdown() break if signaled == Signal.RESTART: self.restart() continue if not reload_completed: continue if signaled == Signal.FULL_RELOAD: self._reload_processes = True if signaled in (Signal.RELOAD, Signal.FULL_RELOAD): self.load() self.processes.start(self.configuration.processes, self._reload_processes) self._reload_processes = False continue if self.listener.incoming(): # check all incoming connection self.asynchronous.schedule( str(uuid.uuid1()), 'checking for new connection(s)', self.listener.new_connections()) peers = self.active_peers() if self._completed(peers): reload_completed = True sleep = ms_sleep # do not attempt to listen on closed sockets even if the peer is still here for io in list(workers.keys()): if io == -1: self._poller.unregister(io) del workers[io] # give a turn to all the peers for key in list(peers): peer = self._peers[key] # limit the number of message handling per second if self._rate_limited(key, peer.neighbor.rate_limit): peers.discard(key) continue # handle the peer action = peer.run() # .run() returns an ACTION enum: # * immediate if it wants to be called again # * later if it should be called again but has no work atm # * close if it is finished and is closing down, or restarting if action == ACTION.CLOSE: if key in self._peers: del self._peers[key] peers.discard(key) # we are loosing this peer, not point to schedule more process work elif action == ACTION.LATER: io = peer.socket() if io != -1: self._poller.register( io, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLNVAL | select.POLLERR) workers[io] = key # no need to come back to it before a a full cycle peers.discard(key) elif action == ACTION.NOW: sleep = 0 if not peers: break # read at least on message per process if there is some and parse it for service, command in self.processes.received(): self.api.text(self, service, command) sleep = 0 self.asynchronous.run() if api_fds != self.processes.fds: for fd in api_fds: if fd == -1: continue if fd not in self.processes.fds: self._poller.unregister(fd) for fd in self.processes.fds: if fd == -1: continue if fd not in api_fds: self._poller.register( fd, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLNVAL | select.POLLERR) api_fds = self.processes.fds for io in self._wait_for_io(sleep): if io not in api_fds: peers.add(workers[io]) if self._stopping and not self._peers.keys(): self._termination('exiting on peer termination', self.Exit.normal) except KeyboardInterrupt: self._termination('^C received', self.Exit.normal) except SystemExit: self._termination('exiting', self.Exit.normal) # socket.error is a subclass of IOError (so catch it first) except socket.error: self._termination('socket error received', self.Exit.socket) except IOError: self._termination( 'I/O Error received, most likely ^C during IO', self.Exit.io_error) except ProcessError: self._termination( 'Problem when sending message(s) to helper program, stopping', self.Exit.process) except select.error: self._termination('problem using select, stopping', self.Exit.select) return self.exit_code def register_peer(self, name, peer): self._peers[name] = peer def teardown_peer(self, name, code): self._peers[name].teardown(code) def shutdown(self): """Terminate all the current BGP connections""" self.logger.critical('performing shutdown', 'reactor') if self.listener: self.listener.stop() self.listener = None for key in self._peers.keys(): self._peers[key].shutdown() self.asynchronous.clear() self.processes.terminate() self.daemon.removepid() self._stopping = True def load(self): """Reload the configuration and send to the peer the route which changed""" self.logger.notice('performing reload of exabgp %s' % version, 'configuration') reloaded = self.configuration.reload() if not reloaded: # # Careful the string below is used but the QA code to check for sucess of failure self.logger.error( 'not reloaded, no change found in the configuration', 'configuration') # Careful the string above is used but the QA code to check for sucess of failure # self.logger.error(str(self.configuration.error), 'configuration') return False for key, peer in self._peers.items(): if key not in self.configuration.neighbors: self.logger.debug('removing peer: %s' % peer.neighbor.name(), 'reactor') peer.remove() for key, neighbor in self.configuration.neighbors.items(): # new peer if key not in self._peers: self.logger.debug('new peer: %s' % neighbor.name(), 'reactor') peer = Peer(neighbor, self) self._peers[key] = peer # modified peer elif self._peers[key].neighbor != neighbor: self.logger.debug( 'peer definition change, establishing a new connection for %s' % str(key), 'reactor') self._peers[key].reestablish(neighbor) # same peer but perhaps not the routes else: # finding what route changed and sending the delta is not obvious self.logger.debug( 'peer definition identical, updating peer routes if required for %s' % str(key), 'reactor') self._peers[key].reconfigure(neighbor) for ip in self._ips: if ip.afi == neighbor.peer_address.afi: self.listener.listen_on(ip, neighbor.peer_address, self._port, neighbor.md5_password, neighbor.md5_base64, None) self.logger.notice('loaded new configuration successfully', 'reactor') return True def restart(self): """Kill the BGP session and restart it""" self.logger.notice('performing restart of exabgp %s' % version, 'reactor') # XXX: FIXME: Could return False, in case there is interference with old config... reloaded = self.configuration.reload() for key in self._peers.keys(): if key not in self.configuration.neighbors.keys(): peer = self._peers[key] self.logger.debug('removing peer %s' % peer.neighbor.name(), 'reactor') self._peers[key].remove() else: self._peers[key].reestablish() self.processes.start(self.configuration.processes, True)
def run(env, comment, configurations, root, validate, pid=0): logger = Logger() logger.notice('Thank you for using ExaBGP', 'welcome') logger.notice('%s' % version, 'version') logger.notice('%s' % sys.version.replace('\n', ' '), 'interpreter') logger.notice('%s' % ' '.join(platform.uname()[:5]), 'os') logger.notice('%s' % root, 'installation') if comment: logger.notice(comment, 'advice') warning = warn() if warning: logger.warning(warning, 'advice') if env.api.cli: pipename = 'exabgp' if env.api.pipename is None else env.api.pipename pipes = named_pipe(root, pipename) if len(pipes) != 1: env.api.cli = False logger.error( 'could not find the named pipes (%s.in and %s.out) required for the cli' % (pipename, pipename), 'cli') logger.error( 'we scanned the following folders (the number is your PID):', 'cli') for location in pipes: logger.error(' - %s' % location, 'cli control') logger.error( 'please make them in one of the folder with the following commands:', 'cli control') logger.error( '> mkfifo %s/run/%s.{in,out}' % (os.getcwd(), pipename), 'cli control') logger.error( '> chmod 600 %s/run/%s.{in,out}' % (os.getcwd(), pipename), 'cli control') if os.getuid() != 0: logger.error( '> chown %d:%d %s/run/%s.{in,out}' % (os.getuid(), os.getgid(), os.getcwd(), pipename), 'cli control') else: pipe = pipes[0] os.environ['exabgp_cli_pipe'] = pipe os.environ['exabgp_api_pipename'] = pipename logger.info('named pipes for the cli are:', 'cli control') logger.info('to send commands %s%s.in' % (pipe, pipename), 'cli control') logger.info('to read responses %s%s.out' % (pipe, pipename), 'cli control') if not env.profile.enable: exit_code = Reactor(configurations).run(validate, root) __exit(env.debug.memory, exit_code) try: import cProfile as profile except ImportError: import profile if env.profile.file == 'stdout': profiled = 'Reactor(%s).run(%s,"%s")' % (str(configurations), str(validate), str(root)) exit_code = profile.run(profiled) __exit(env.debug.memory, exit_code) if pid: profile_name = "%s-pid-%d" % (env.profile.file, pid) else: profile_name = env.profile.file notice = '' if os.path.isdir(profile_name): notice = 'profile can not use this filename as output, it is not a directory (%s)' % profile_name if os.path.exists(profile_name): notice = 'profile can not use this filename as output, it already exists (%s)' % profile_name if not notice: cwd = os.getcwd() logger.debug('profiling ....', 'reactor') profiler = profile.Profile() profiler.enable() try: exit_code = Reactor(configurations).run(validate, root) except Exception: exit_code = Reactor.Exit.unknown raise finally: profiler.disable() kprofile = lsprofcalltree.KCacheGrind(profiler) try: destination = profile_name if profile_name.startswith( '/') else os.path.join(cwd, profile_name) with open(destination, 'w+') as write: kprofile.output(write) except IOError: notice = 'could not save profiling in formation at: ' + destination logger.debug("-" * len(notice), 'reactor') logger.debug(notice, 'reactor') logger.debug("-" * len(notice), 'reactor') __exit(env.debug.memory, exit_code) else: logger.debug("-" * len(notice), 'reactor') logger.debug(notice, 'reactor') logger.debug("-" * len(notice), 'reactor') Reactor(configurations).run(validate, root) __exit(env.debug.memory, 1)
class API(Command): def __init__(self, reactor): self.reactor = reactor self.logger = Logger() self.configuration = Configuration([]) def log_message(self, message, level='INFO'): self.logger.notice(message, 'api', level) def log_failure(self, message, level='ERR'): error = str(self.configuration.tokeniser.error) report = '%s\nreason: %s' % (message, error) if error else message self.logger.error(report, 'api', level) def text(self, reactor, service, command): for registered in self.functions: if registered == command or registered + ' ' in command: return self.callback['text'][registered](self, reactor, service, command) reactor.processes.answer(service, Answer.error) self.logger.warning( 'command from process not understood : %s' % command, 'api') return False def api_route(self, command): action, line = command.split(' ', 1) self.configuration.static.clear() if not self.configuration.partial('static', line): return [] if self.configuration.scope.location(): return [] self.configuration.scope.to_context() changes = self.configuration.scope.pop_routes() return changes def api_flow(self, command): action, flow, line = command.split(' ', 2) self.configuration.flow.clear() if not self.configuration.partial('flow', line): return [] if self.configuration.scope.location(): return [] self.configuration.scope.to_context() changes = self.configuration.scope.pop_routes() return changes def api_vpls(self, command): action, line = command.split(' ', 1) self.configuration.l2vpn.clear() if not self.configuration.partial('l2vpn', line): return [] self.configuration.scope.to_context() changes = self.configuration.scope.pop('l2vpn') return changes def api_attributes(self, command, peers): action, line = command.split(' ', 1) self.configuration.static.clear() if not self.configuration.partial('static', line): return [] self.configuration.scope.to_context() changes = self.configuration.scope.pop_routes() return changes def api_refresh(self, command): tokens = formated(command).split(' ')[2:] if len(tokens) != 2: return False afi = AFI.value(tokens.pop(0)) safi = SAFI.value(tokens.pop(0)) if afi is None or safi is None: return False return [RouteRefresh(afi, safi)] def api_eor(self, command): tokens = formated(command).split(' ')[2:] number = len(tokens) if not number: return Family(1, 1) if number != 2: return False afi = AFI.fromString(tokens[0]) if afi == AFI.undefined: return False safi = SAFI.fromString(tokens[1]) if safi == SAFI.undefined: return False return Family(afi, safi) def api_operational(self, command): tokens = formated(command).split(' ') op = tokens[1].lower() what = tokens[2].lower() if op != 'operational': return False self.configuration.tokeniser.iterate.replenish(tokens[3:]) # None or a class return operational(what, self.configuration.tokeniser.iterate)
class Reactor(object): class Exit(object): normal = 0 validate = 0 listening = 1 configuration = 1 privileges = 1 log = 1 pid = 1 socket = 1 io_error = 1 process = 1 select = 1 unknown = 1 # [hex(ord(c)) for c in os.popen('clear').read()] clear = concat_bytes_i( character(int(c, 16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']) def __init__(self, configurations): self._ips = environment.settings().tcp.bind self._port = environment.settings().tcp.port self._stopping = environment.settings().tcp.once self.exit_code = self.Exit.unknown self.max_loop_time = environment.settings().reactor.speed self._sleep_time = self.max_loop_time / 100 self._busyspin = {} self.early_drop = environment.settings().daemon.drop self.processes = None self.configuration = Configuration(configurations) self.logger = Logger() self.asynchronous = ASYNC() self.signal = Signal() self.daemon = Daemon(self) self.listener = Listener(self) self.api = API(self) self.peers = {} self._reload_processes = False self._saved_pid = False def _termination(self, reason, exit_code): self.exit_code = exit_code self.signal.received = Signal.SHUTDOWN self.logger.critical(reason, 'reactor') def _prevent_spin(self): second = int(time.time()) if not second in self._busyspin: self._busyspin = {second: 0} self._busyspin[second] += 1 if self._busyspin[second] > self.max_loop_time: time.sleep(self._sleep_time) return True return False def _api_ready(self, sockets, sleeptime): fds = self.processes.fds() ios = fds + sockets try: read, _, _ = select.select(ios, [], [], sleeptime) for fd in fds: if fd in read: read.remove(fd) return read except select.error as exc: err_no, message = exc.args # pylint: disable=W0633 if err_no not in error.block: raise exc self._prevent_spin() return [] except socket.error as exc: # python 3 does not raise on closed FD, but python2 does # we have lost a peer and it is causing the select # to complain, the code will self-heal, ignore the issue # (EBADF from python2 must be ignored if when checkign error.fatal) # otherwise sending notification causes TCP to drop and cause # this code to kill ExaBGP self._prevent_spin() return [] except ValueError as exc: # The peer closing the TCP connection lead to a negative file descritor self._prevent_spin() return [] except KeyboardInterrupt: self._termination('^C received', self.Exit.normal) return [] def _active_peers(self): peers = set() for key, peer in self.peers.items(): if not peer.neighbor.passive or peer.proto: peers.add(key) return peers def _completed(self, peers): for peer in peers: if self.peers[peer].neighbor.rib.outgoing.pending(): return False return True def run(self, validate, root): self.daemon.daemonise() # Make sure we create processes once we have closed file descriptor # unfortunately, this must be done before reading the configuration file # so we can not do it with dropped privileges self.processes = Processes() # we have to read the configuration possibly with root privileges # as we need the MD5 information when we bind, and root is needed # to bind to a port < 1024 # this is undesirable as : # - handling user generated data as root should be avoided # - we may not be able to reload the configuration once the privileges are dropped # but I can not see any way to avoid it for ip in self._ips: if not self.listener.listen_on(ip, None, self._port, None, False, None): return self.Exit.listening if not self.load(): return self.Exit.configuration if validate: # only validate configuration self.logger.warning('', 'configuration') self.logger.warning('parsed Neighbors, un-templated', 'configuration') self.logger.warning('------------------------------', 'configuration') self.logger.warning('', 'configuration') for key in self.peers: self.logger.warning(str(self.peers[key].neighbor), 'configuration') self.logger.warning('', 'configuration') return self.Exit.validate for neighbor in self.configuration.neighbors.values(): if neighbor.listen: if not self.listener.listen_on( neighbor.md5_ip, neighbor.peer_address, neighbor.listen, neighbor.md5_password, neighbor.md5_base64, neighbor.ttl_in): return self.Exit.listening if not self.early_drop: self.processes.start(self.configuration.processes) if not self.daemon.drop_privileges(): self.logger.critical( 'could not drop privileges to \'%s\' refusing to run as root' % self.daemon.user, 'reactor') self.logger.critical( 'set the environmemnt value exabgp.daemon.user to change the unprivileged user', 'reactor') return self.Exit.privileges if self.early_drop: self.processes.start(self.configuration.processes) # This is required to make sure we can write in the log location as we now have dropped root privileges if not self.logger.restart(): self.logger.critical('could not setup the logger, aborting', 'reactor') return self.Exit.log if not self.daemon.savepid(): return self.Exit.pid # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ? reload_completed = False wait = environment.settings().tcp.delay if wait: sleeptime = (wait * 60) - int(time.time()) % (wait * 60) self.logger.debug( 'waiting for %d seconds before connecting' % sleeptime, 'reactor') time.sleep(float(sleeptime)) workers = {} peers = set() while True: try: if self.signal.received: for key in self.peers: if self.peers[key].neighbor.api['signal']: self.peers[key].reactor.processes.signal( self.peers[key].neighbor, self.signal.number) signaled = self.signal.received self.signal.rearm() if signaled == Signal.SHUTDOWN: self.shutdown() break if signaled == Signal.RESTART: self.restart() continue if not reload_completed: continue if signaled == Signal.FULL_RELOAD: self._reload_processes = True if signaled in (Signal.RELOAD, Signal.FULL_RELOAD): self.load() self.processes.start(self.configuration.processes, self._reload_processes) self._reload_processes = False continue if self.listener.incoming(): # check all incoming connection self.asynchronous.schedule( str(uuid.uuid1()), 'checking for new connection(s)', self.listener.new_connections()) peers = self._active_peers() if self._completed(peers): reload_completed = True sleep = self._sleep_time # do not attempt to listen on closed sockets even if the peer is still here for io in list(workers.keys()): if io.fileno() == -1: del workers[io] # give a turn to all the peers for key in list(peers): peer = self.peers[key] action = peer.run() # .run() returns an ACTION enum: # * immediate if it wants to be called again # * later if it should be called again but has no work atm # * close if it is finished and is closing down, or restarting if action == ACTION.CLOSE: if key in self.peers: del self.peers[key] peers.discard(key) # we are loosing this peer, not point to schedule more process work elif action == ACTION.LATER: for io in peer.sockets(): workers[io] = key # no need to come back to it before a a full cycle peers.discard(key) elif action == ACTION.NOW: sleep = 0 if not peers: break # read at least on message per process if there is some and parse it for service, command in self.processes.received(): self.api.text(self, service, command) sleep = 0 self.asynchronous.run() for io in self._api_ready(list(workers), sleep): peers.add(workers[io]) del workers[io] if self._stopping and not self.peers.keys(): self._termination('exiting on peer termination', self.Exit.normal) except KeyboardInterrupt: self._termination('^C received', self.Exit.normal) except SystemExit: self._termination('exiting', self.Exit.normal) # socket.error is a subclass of IOError (so catch it first) except socket.error: self._termination('socket error received', self.Exit.socket) except IOError: self._termination( 'I/O Error received, most likely ^C during IO', self.Exit.io_error) except ProcessError: self._termination( 'Problem when sending message(s) to helper program, stopping', self.Exit.process) except select.error: self._termination('problem using select, stopping', self.Exit.select) return self.exit_code def shutdown(self): """Terminate all the current BGP connections""" self.logger.critical('performing shutdown', 'reactor') if self.listener: self.listener.stop() self.listener = None for key in self.peers.keys(): self.peers[key].shutdown() self.asynchronous.clear() self.processes.terminate() self.daemon.removepid() self._stopping = True def load(self): """Reload the configuration and send to the peer the route which changed""" self.logger.notice('performing reload of exabgp %s' % version, 'configuration') reloaded = self.configuration.reload() if not reloaded: # # Careful the string below is used but the QA code to check for sucess of failure self.logger.error( 'problem with the configuration file, no change done', 'configuration') # Careful the string above is used but the QA code to check for sucess of failure # self.logger.error(str(self.configuration.error), 'configuration') return False for key, peer in self.peers.items(): if key not in self.configuration.neighbors: self.logger.debug('removing peer: %s' % peer.neighbor.name(), 'reactor') peer.remove() for key, neighbor in self.configuration.neighbors.items(): # new peer if key not in self.peers: self.logger.debug('new peer: %s' % neighbor.name(), 'reactor') peer = Peer(neighbor, self) self.peers[key] = peer # modified peer elif self.peers[key].neighbor != neighbor: self.logger.debug( 'peer definition change, establishing a new connection for %s' % str(key), 'reactor') self.peers[key].reestablish(neighbor) # same peer but perhaps not the routes else: # finding what route changed and sending the delta is not obvious self.logger.debug( 'peer definition identical, updating peer routes if required for %s' % str(key), 'reactor') self.peers[key].reconfigure(neighbor) for ip in self._ips: if ip.afi == neighbor.peer_address.afi: self.listener.listen_on(ip, neighbor.peer_address, self._port, neighbor.md5_password, neighbor.md5_base64, None) self.logger.notice('loaded new configuration successfully', 'reactor') return True def restart(self): """Kill the BGP session and restart it""" self.logger.notice('performing restart of exabgp %s' % version, 'reactor') self.configuration.reload() for key in self.peers.keys(): if key not in self.configuration.neighbors.keys(): neighbor = self.configuration.neighbors[key] self.logger.debug('removing Peer %s' % neighbor.name(), 'reactor') self.peers[key].remove() else: self.peers[key].reestablish() self.processes.start(self.configuration.processes, True)
def check_neighbor (neighbors): logger = Logger() logger._option['parser'] = True logger.notice('\ndecoding routes in configuration','parser') for name in neighbors.keys(): neighbor = copy.deepcopy(neighbors[name]) neighbor.local_as = neighbor.peer_as path = {} for f in NLRI.known_families(): if neighbor.add_path: path[f] = neighbor.add_path capa = Capabilities().new(neighbor,False) if path: capa[Capability.CODE.ADD_PATH] = path capa[Capability.CODE.MULTIPROTOCOL] = neighbor.families() routerid_1 = str(neighbor.router_id) routerid_2 = '.'.join(str((int(_)+1) % 250) for _ in str(neighbor.router_id).split('.',-1)) o1 = Open(Version(4),ASN(neighbor.local_as),HoldTime(180),RouterID(routerid_1),capa) o2 = Open(Version(4),ASN(neighbor.peer_as),HoldTime(180),RouterID(routerid_2),capa) negotiated = Negotiated(neighbor) negotiated.sent(o1) negotiated.received(o2) # grouped = False for _ in neighbor.rib.outgoing.updates(False): pass for change1 in neighbor.rib.outgoing.cached_changes(): str1 = change1.extensive() packed = list(Update([change1.nlri],change1.attributes).messages(negotiated)) pack1 = packed[0] logger.debug('parsed route requires %d updates' % len(packed),'parser') logger.debug('update size is %d' % len(pack1),'parser') logger.debug('parsed route %s' % str1,'parser') logger.debug('parsed hex %s' % od(pack1),'parser') # This does not take the BGP header - let's assume we will not break that :) try: logger.debug('') # new line pack1s = pack1[19:] if pack1.startswith(b'\xFF'*16) else pack1 update = Update.unpack_message(pack1s,negotiated) change2 = Change(update.nlris[0],update.attributes) str2 = change2.extensive() pack2 = list(Update([update.nlris[0]],update.attributes).messages(negotiated))[0] logger.debug('recoded route %s' % str2,'parser') logger.debug('recoded hex %s' % od(pack2),'parser') str1 = str1.replace('attribute [ 0x04 0x80 0x00000064 ]','med 100') str1r = str1.lower().replace(' med 100','').replace(' local-preference 100','').replace(' origin igp','') str2r = str2.lower().replace(' med 100','').replace(' local-preference 100','').replace(' origin igp','') str2r = str2r.replace('large-community [ 1:2:3 10:11:12 ]','attribute [ 0x20 0xc0 0x0000000100000002000000030000000a0000000b0000000c ]') if 'next-hop self' in str1r: if ':' in str1r: str1r = str1r.replace('next-hop self','next-hop ::1') else: str1r = str1r.replace('next-hop self','next-hop %s' % neighbor.local_address) if ' name ' in str1r: parts = str1r.split(' ') pos = parts.index('name') str1r = ' '.join(parts[:pos] + parts[pos+2:]) skip = False if str1r != str2r: if 'attribute [' in str1r and ' 0x00 ' in str1r: # we do not decode non-transitive attributes logger.debug('skipping string check on update with non-transitive attribute(s)','parser') skip = True else: logger.debug('strings are different:','parser') logger.debug('[%s]' % (str1r),'parser') logger.debug('[%s]' % (str2r),'parser') return False else: logger.debug('strings are fine','parser') if skip: logger.debug('skipping encoding for update with non-transitive attribute(s)','parser') elif pack1 != pack2: logger.debug('encoding are different','parser') logger.debug('[%s]' % (od(pack1)),'parser') logger.debug('[%s]' % (od(pack2)),'parser') return False else: logger.debug('encoding is fine','parser') logger.debug('----------------------------------------','parser') logger.debug('JSON nlri %s' % change1.nlri.json(),'parser') logger.debug('JSON attr %s' % change1.attributes.json(),'parser') except Notify as exc: logger.debug('----------------------------------------','parser') logger.debug(str(exc),'parser') logger.debug('----------------------------------------','parser') return False neighbor.rib.clear() return True
def check_neighbor(neighbors): logger = Logger() logger._option['parser'] = True logger.notice('\ndecoding routes in configuration', 'parser') for name in neighbors.keys(): neighbor = neighbors[name] path = {} for f in NLRI.known_families(): if neighbor.add_path: path[f] = neighbor.add_path capa = Capabilities().new(neighbor, False) if path: capa[Capability.CODE.ADD_PATH] = path capa[Capability.CODE.MULTIPROTOCOL] = neighbor.families() routerid_1 = str(neighbor.router_id) routerid_2 = '.'.join( str((int(_) + 1) % 250) for _ in str(neighbor.router_id).split('.', -1)) o1 = Open(Version(4), ASN(neighbor.local_as), HoldTime(180), RouterID(routerid_1), capa) o2 = Open(Version(4), ASN(neighbor.peer_as), HoldTime(180), RouterID(routerid_2), capa) negotiated = Negotiated(neighbor) negotiated.sent(o1) negotiated.received(o2) # grouped = False for _ in neighbor.rib.outgoing.updates(False): pass for change1 in neighbor.rib.outgoing.cached_changes(): str1 = change1.extensive() packed = list( Update([change1.nlri], change1.attributes).messages(negotiated)) pack1 = packed[0] logger.debug('parsed route requires %d updates' % len(packed), 'parser') logger.debug('update size is %d' % len(pack1), 'parser') logger.debug('parsed route %s' % str1, 'parser') logger.debug('parsed hex %s' % od(pack1), 'parser') # This does not take the BGP header - let's assume we will not break that :) try: logger.debug('') # new line pack1s = pack1[19:] if pack1.startswith(b'\xFF' * 16) else pack1 update = Update.unpack_message(pack1s, negotiated) change2 = Change(update.nlris[0], update.attributes) str2 = change2.extensive() pack2 = list( Update([update.nlris[0]], update.attributes).messages(negotiated))[0] logger.debug('recoded route %s' % str2, 'parser') logger.debug('recoded hex %s' % od(pack2), 'parser') str1 = str1.replace('attribute [ 0x04 0x80 0x00000064 ]', 'med 100') str1r = str1.lower().replace(' med 100', '').replace( ' local-preference 100', '').replace(' origin igp', '') str2r = str2.lower().replace(' med 100', '').replace( ' local-preference 100', '').replace(' origin igp', '') str2r = str2r.replace( 'large-community [ 1:2:3 10:11:12 ]', 'attribute [ 0x20 0xc0 0x0000000100000002000000030000000a0000000b0000000c ]' ) if 'next-hop self' in str1r: if ':' in str1r: str1r = str1r.replace('next-hop self', 'next-hop ::1') else: str1r = str1r.replace( 'next-hop self', 'next-hop %s' % neighbor.local_address) if ' name ' in str1r: parts = str1r.split(' ') pos = parts.index('name') str1r = ' '.join(parts[:pos] + parts[pos + 2:]) skip = False if str1r != str2r: if 'attribute [' in str1r and ' 0x00 ' in str1r: # we do not decode non-transitive attributes logger.debug( 'skipping string check on update with non-transitive attribute(s)', 'parser') skip = True else: logger.debug('strings are different:', 'parser') logger.debug('[%s]' % (str1r), 'parser') logger.debug('[%s]' % (str2r), 'parser') return False else: logger.debug('strings are fine', 'parser') if skip: logger.debug( 'skipping encoding for update with non-transitive attribute(s)', 'parser') elif pack1 != pack2: logger.debug('encoding are different', 'parser') logger.debug('[%s]' % (od(pack1)), 'parser') logger.debug('[%s]' % (od(pack2)), 'parser') return False else: logger.debug('encoding is fine', 'parser') logger.debug('----------------------------------------', 'parser') logger.debug('JSON nlri %s' % change1.nlri.json(), 'parser') logger.debug('JSON attr %s' % change1.attributes.json(), 'parser') except Notify as exc: logger.debug('----------------------------------------', 'parser') logger.debug(str(exc), 'parser') logger.debug('----------------------------------------', 'parser') return False neighbor.rib.clear() return True
def run (env, comment, configurations, root, validate, pid=0): logger = Logger() logger.notice('Thank you for using ExaBGP','welcome') logger.notice('%s' % version,'version') logger.notice('%s' % sys.version.replace('\n',' '),'interpreter') logger.notice('%s' % ' '.join(platform.uname()[:5]),'os') logger.notice('%s' % root,'installation') if comment: logger.notice(comment,'advice') warning = warn() if warning: logger.warning(warning,'advice') if env.api.cli: pipes = named_pipe(root) if len(pipes) != 1: env.api.cli = False logger.error('could not find the named pipes (exabgp.in and exabgp.out) required for the cli','cli') logger.error('we scanned the following folders (the number is your PID):','cli') for location in pipes: logger.error(' - %s' % location,'cli control') logger.error('please make them in one of the folder with the following commands:','cli control') logger.error('> mkfifo %s/run/exabgp.{in,out}' % os.getcwd(),'cli control') logger.error('> chmod 600 %s/run/exabgp.{in,out}' % os.getcwd(),'cli control') if os.getuid() != 0: logger.error('> chown %d:%d %s/run/exabgp.{in,out}' % (os.getuid(),os.getgid(),os.getcwd()),'cli control') else: pipe = pipes[0] os.environ['exabgp_cli_pipe'] = pipe logger.info('named pipes for the cli are:','cli control') logger.info('to send commands %sexabgp.in' % pipe,'cli control') logger.info('to read responses %sexabgp.out' % pipe,'cli control') if not env.profile.enable: was_ok = Reactor(configurations).run(validate,root) __exit(env.debug.memory,0 if was_ok else 1) try: import cProfile as profile except ImportError: import profile if env.profile.file == 'stdout': profiled = 'Reactor(%s).run(%s,"%s")' % (str(configurations),str(validate),str(root)) was_ok = profile.run(profiled) __exit(env.debug.memory,0 if was_ok else 1) if pid: profile_name = "%s-pid-%d" % (env.profile.file,pid) else: profile_name = env.profile.file notice = '' if os.path.isdir(profile_name): notice = 'profile can not use this filename as output, it is not a directory (%s)' % profile_name if os.path.exists(profile_name): notice = 'profile can not use this filename as output, it already exists (%s)' % profile_name if not notice: cwd = os.getcwd() logger.debug('profiling ....','reactor') profiler = profile.Profile() profiler.enable() try: was_ok = Reactor(configurations).run(validate,root) except Exception: was_ok = False raise finally: profiler.disable() kprofile = lsprofcalltree.KCacheGrind(profiler) try: destination = profile_name if profile_name.startswith('/') else os.path.join(cwd,profile_name) with open(destination, 'w+') as write: kprofile.output(write) except IOError: notice = 'could not save profiling in formation at: ' + destination logger.debug("-"*len(notice),'reactor') logger.debug(notice,'reactor') logger.debug("-"*len(notice),'reactor') __exit(env.debug.memory,0 if was_ok else 1) else: logger.debug("-"*len(notice),'reactor') logger.debug(notice,'reactor') logger.debug("-"*len(notice),'reactor') Reactor(configurations).run(validate,root) __exit(env.debug.memory,1)
class API (Command): def __init__ (self, reactor): self.reactor = reactor self.logger = Logger() self.configuration = Configuration([]) def log_message (self, message, level='INFO'): self.logger.notice(message,'api',level) def log_failure (self, message, level='ERR'): error = str(self.configuration.tokeniser.error) report = '%s\nreason: %s' % (message, error) if error else message self.logger.error(report,'api',level) def text (self, reactor, service, command): for registered in self.functions: if registered == command or registered + ' ' in command: return self.callback['text'][registered](self,reactor,service,command) reactor.processes.answer_error(service) self.logger.warning('command from process not understood : %s' % command,'api') return False def api_route (self, command): action, line = command.split(' ',1) self.configuration.static.clear() if not self.configuration.partial('static',line): return [] if self.configuration.scope.location(): return [] self.configuration.scope.to_context() changes = self.configuration.scope.pop_routes() return changes def api_flow (self, command): action, flow, line = command.split(' ',2) self.configuration.flow.clear() if not self.configuration.partial('flow',line): return [] if self.configuration.scope.location(): return [] self.configuration.scope.to_context() changes = self.configuration.scope.pop_routes() return changes def api_vpls (self, command): action, line = command.split(' ',1) self.configuration.l2vpn.clear() if not self.configuration.partial('l2vpn',line): return [] self.configuration.scope.to_context() changes = self.configuration.scope.pop('l2vpn') return changes def api_attributes (self, command, peers): action, line = command.split(' ',1) self.configuration.static.clear() if not self.configuration.partial('static',line): return [] self.configuration.scope.to_context() changes = self.configuration.scope.pop_routes() return changes def api_refresh (self, command): tokens = formated(command).split(' ')[2:] if len(tokens) != 2: return False afi = AFI.value(tokens.pop(0)) safi = SAFI.value(tokens.pop(0)) if afi is None or safi is None: return False return [RouteRefresh(afi,safi)] def api_eor (self, command): tokens = formated(command).split(' ')[2:] number = len(tokens) if not number: return Family(1,1) if number != 2: return False afi = AFI.fromString(tokens[0]) if afi == AFI.undefined: return False safi = SAFI.fromString(tokens[1]) if safi == SAFI.undefined: return False return Family(afi,safi) def api_operational (self, command): tokens = formated(command).split(' ') op = tokens[1].lower() what = tokens[2].lower() if op != 'operational': return False self.configuration.tokeniser.iterate.replenish(tokens[3:]) # None or a class return operational(what,self.configuration.tokeniser.iterate)