def __init__(self, configurations): self._ips = getenv().tcp.bind self._port = getenv().tcp.port self._stopping = getenv().tcp.once self.exit_code = self.Exit.unknown self.max_loop_time = getenv().reactor.speed self._sleep_time = self.max_loop_time / 100 self._busyspin = {} self._ratelimit = {} self.early_drop = getenv().daemon.drop self.processes = None self.configuration = Configuration(configurations) self.asynchronous = ASYNC() self.signal = Signal() self.daemon = Daemon(self) self.listener = Listener(self) self.api = API(self) self._peers = {} self._saved_pid = False self._poller = select.poll()
def check_update(neighbor, raw): option.enabled['parser'] = True negotiated = _negotiated(neighbor) while raw: if raw.startswith(b'\xff' * 16): kind = raw[18] size = (raw[16] << 16) + raw[17] injected, raw = raw[19:size], raw[size:] if kind == 2: log.debug('the message is an update', 'parser') decoding = 'update' else: log.debug( 'the message is not an update (%d) - aborting' % kind, 'parser') return False else: log.debug('header missing, assuming this message is ONE update', 'parser') decoding = 'update' injected, raw = raw, '' try: # This does not take the BGP header - let's assume we will not break that :) update = Update.unpack_message(injected, Direction.IN, negotiated) except Notify: import traceback log.error('could not parse the message', 'parser') log.error(traceback.format_exc(), 'parser') if getenv().debug.pdb: raise return False except Exception: import traceback log.error('could not parse the message', 'parser') log.error(traceback.format_exc(), 'parser') if getenv().debug.pdb: raise return False log.debug('', 'parser') # new line for number in range(len(update.nlris)): change = Change(update.nlris[number], update.attributes) log.info( 'decoded %s %s %s' % (decoding, change.nlri.action, change.extensive()), 'parser') log.info( 'update json %s' % Response.JSON(json_version).update( neighbor, 'in', update, None, '', ''), 'parser') return True
def __init__(self, reactor): self.pid = getenv().daemon.pid self.user = getenv().daemon.user self.daemonize = getenv().daemon.daemonize self.umask = getenv().daemon.umask self.reactor = reactor os.chdir('/') os.umask(self.umask)
def __init__(self): self.clean() self.silence = False self._buffer = {} self._configuration = {} self._restart = {} self.respawn_number = 5 if getenv().api.respawn else 0 self.terminate_on_error = getenv().api.terminate self.ack = getenv().api.ack
def check_nlri(neighbor, routes): option.enabled['parser'] = True announced = _hexa(routes) negotiated = _negotiated(neighbor) afi, safi = neighbor.families()[0] # Is the peer going to send us some Path Information with the route (AddPath) addpath = negotiated.addpath.send(afi, safi) nlris = [] try: while announced: log.debug('parsing NLRI %s' % announced, 'parser') nlri, announced = NLRI.unpack_nlri(afi, safi, announced, IN.ANNOUNCED, addpath) nlris.append(nlri) except Exception as exc: log.error('could not parse the nlri', 'parser') from exabgp.debug import string_exception log.error(string_exception(exc), 'parser') if getenv().debug.pdb: raise return False log.debug('', 'parser') # new line for nlri in nlris: log.info('nlri json %s' % nlri.json(), 'parser') return True
def daemonise(self): if not self.daemonize: return logging = getenv().log if logging.enable and logging.destination.lower() in ('stdout', 'stderr'): log.critical('ExaBGP can not fork when logs are going to %s' % log.destination.lower(), 'daemon') return def fork_exit(): try: pid = os.fork() if pid > 0: os._exit(0) except OSError as exc: log.critical('can not fork, errno %d : %s' % (exc.errno, exc.strerror), 'daemon') # do not detach if we are already supervised or run by init like process if self._is_socket(sys.__stdin__.fileno()) or os.getppid() == 1: return fork_exit() os.setsid() fork_exit() self.silence()
def callback(): lines_per_yield = getenv().api.chunk if last in ('routes', 'extensive', 'static', 'flow', 'l2vpn'): peers = reactor.peers() else: peers = [n for n in reactor.peers() if 'neighbor %s' % last in n] for key in peers: routes = reactor.neighor_rib(key, rib_name, advertised) while routes: changes, routes = routes[:lines_per_yield], routes[ lines_per_yield:] for change in changes: if isinstance(change.nlri, route_type): if extensive: reactor.processes.write( service, '%s %s %s' % (reactor.neighbor_name(key), '%s %s' % change.nlri.family(), change.extensive()), ) else: reactor.processes.write( service, 'neighbor %s %s %s' % (reactor.neighbor_ip(key), '%s %s' % change.nlri.family(), str(change.nlri)), ) yield True reactor.processes.answer_done(service)
def cmdline(cmdarg): route = ''.join(cmdarg.payload).replace(' ', '') if not is_bgp(route): # parser.print_usage() sys.stdout.write('Environment values are:\n%s\n\n' % '\n'.join(' - %s' % _ for _ in Env.default())) sys.stdout.write('The BGP message must be an hexadecimal string.\n\n') sys.stdout.write('All colons or spaces are ignored, for example:\n\n') sys.stdout.write(' 001E0200000007900F0003000101\n') sys.stdout.write(' 001E:02:0000:0007:900F:0003:0001:01\n') sys.stdout.write( ' FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101\n') sys.stdout.write( ' FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01\n' ) sys.stdout.write( " FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101\n" ) sys.stdout.flush() sys.exit(1) env = getenv() env.bgp.passive = True env.log.parser = True env.tcp.bind = '' if cmdarg.debug: env.log.all = True env.log.level = 'DEBUG' if cmdarg.pdb: env.debug.pdb = True log.init(env) trace_interceptor(env.debug.pdb) sanitized = ''.join(cmdarg.payload).replace(':', '').replace(' ', '') if cmdarg.configuration: configuration = Configuration([getconf(cmdarg.configuration)]) elif cmdarg.family: families = cmdarg.family.split() if len(families) % 2: sys.stdout.write('families provided are invalid') sys.stdout.flush() sys.exit(1) families_pair = [families[n:n + 2] for n in range(0, len(families), 2)] families_text = ';'.join([f'{a} {s}' for a, s in families_pair]) conf = conf_none.replace('[families]', families_text) configuration = Configuration([conf], text=True) else: configuration = Configuration([conf_all], text=True) valid_nlri = Reactor(configuration).check(sanitized, cmdarg.nlri) if valid_nlri: return 0 return 1
def reload(self): try: return self._reload() except KeyboardInterrupt: return self.error.set( 'configuration reload aborted by ^C or SIGINT') except Error as exc: if getenv().debug.configuration: raise return self.error.set( 'problem parsing configuration file line %d\n' 'error message: %s' % (self.tokeniser.index_line, exc)) except Exception as exc: if getenv().debug.configuration: raise return self.error.set( 'problem parsing configuration file line %d\n' 'error message: %s' % (self.tokeniser.index_line, exc))
def active_peers(self): peers = set() if getenv().bgp.passive: return peers for key, peer in self._peers.items(): if peer.neighbor['passive'] and not peer.proto: continue peers.add(key) return peers
def _establish(self): # try to establish the outgoing connection self.fsm.change(FSM.ACTIVE) if getenv().bgp.passive: while not self.proto: yield ACTION.LATER self.fsm.change(FSM.IDLE) if not self.proto: for action in self._connect(): if action in ACTION.ALL: yield action self.fsm.change(FSM.CONNECT) # normal sending of OPEN first ... if self.neighbor['local-as']: for sent_open in self._send_open(): if sent_open in ACTION.ALL: yield sent_open self.proto.negotiated.sent(sent_open) self.fsm.change(FSM.OPENSENT) # read the peer's open for received_open in self._read_open(): if received_open in ACTION.ALL: yield received_open self.proto.negotiated.received(received_open) self.proto.connection.msg_size = self.proto.negotiated.msg_size # if we mirror the ASN, we need to read first and send second if not self.neighbor['local-as']: for sent_open in self._send_open(): if sent_open in ACTION.ALL: yield sent_open self.proto.negotiated.sent(sent_open) self.fsm.change(FSM.OPENSENT) self.proto.validate_open() self.fsm.change(FSM.OPENCONFIRM) self.recv_timer = ReceiveTimer(self.proto.connection.session, self.proto.negotiated.holdtime, 4, 0) for action in self._send_ka(): yield action for action in self._read_ka(): yield action self.fsm.change(FSM.ESTABLISHED) self.stats['complete'] = time.time() # let the caller know that we were sucesfull yield ACTION.NOW
def __init__(self): dict.__init__(self) # cached representation of the object self._str = '' self._idx = '' self._json = '' # The parsed attributes have no mp routes and/or those are last self.cacheable = True # XXX: FIXME: surely not the best place for this Attribute.caching = getenv().cache.attributes
def __init__(self, afi, peer, local): self.msg_size = ExtendedMessage.INITIAL_SIZE self.defensive = getenv().debug.defensive self.afi = afi self.peer = peer self.local = local self.io = None self.established = False self._rpoller = {} self._wpoller = {} self.id = self.identifier.get(self.direction, 1)
def __init__(self, neighbor, reactor): # We only to try to connect via TCP once self.once = getenv().tcp.once self.bind = True if getenv().tcp.bind else False now = time.time() self.reactor = reactor self.neighbor = neighbor # The next restart neighbor definition self._neighbor = None self.proto = None self.fsm = FSM(self, FSM.IDLE) self.stats = { 'fsm': self.fsm, 'creation': now, 'reset': now, 'complete': 0, } self.generator = None # The peer should restart after a stop self._restart = True # The peer was restarted (to know what kind of open to send for graceful restart) self._restarted = FORCE_GRACEFUL # We want to remove routes which are not in the configuration anymore after a signal to reload self._reconfigure = True # We want to send all the known routes self._resend_routes = SEND.DONE # We have been asked to teardown the session with this code self._teardown = None self._delay = Delay() self.recv_timer = None
def cmdline(cmdarg): env = getenv() # Must be done before setting the logger as it modify its behaviour if cmdarg.verbose: env.log.all = True env.log.level = syslog.LOG_DEBUG log.init() if cmdarg.pdb: env.debug.pdb = True if cmdarg.verbose: env.log.parser = True for configuration in cmdarg.configuration: log.notice(f'loading {configuration}', 'configuration') location = getconf(configuration) if not location: log.critical(f'{configuration} is not an exabgp config file', 'configuration') sys.exit(1) config = Reactor([location]).configuration if not config.reload(): log.critical(f'{configuration} is not a valid config file', 'configuration') sys.exit(1) log.info(f'\u2713 loading', 'configuration') if cmdarg.neighbor: log.notice(f'checking neighbors', 'configuration') for name, neighbor in config.neighbors.items(): reparsed = neighbor.string() for line in reparsed.split('\n'): log.debug(line, configuration) log.info(f'\u2713 neighbor {name.split()[1]}', 'configuration') if cmdarg.route: log.notice(f'checking routes', 'configuration') if not check_generation(config.neighbors): log.critical(f'{configuration} has an invalid route', 'configuration') sys.exit(1) log.info(f'\u2713 routes', 'configuration')
def __init__(self, peer): self.peer = peer self.neighbor = peer.neighbor self.negotiated = Negotiated(self.neighbor) self.connection = None if self.neighbor['connect']: self.port = self.neighbor['connect'] elif os.environ.get('exabgp.tcp.port', '').isdigit(): self.port = int(os.environ.get('exabgp.tcp.port')) elif os.environ.get('exabgp_tcp_port', '').isdigit(): self.port = int(os.environ.get('exabgp_tcp_port')) else: self.port = 179 from exabgp.environment import getenv self.log_routes = peer.neighbor['adj-rib-in'] or getenv().log.routes
def _read_open(self): wait = getenv().bgp.openwait opentimer = ReceiveTimer( self.proto.connection.session, wait, 1, 1, 'waited for open too long, we do not like stuck in active') # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check without going to the other peer for message in self.proto.read_open(self.neighbor.peer_address.top()): opentimer.check_ka(message) # XXX: FIXME: change the whole code to use the ord and not the chr version # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check if message.ID == Message.CODE.NOP: # If a peer does not reply to OPEN message, or not enough bytes # yielding ACTION.NOW can cause ExaBGP to busy spin trying to # read from peer. See GH #723 . yield ACTION.LATER yield message
def init(self): env = getenv() self.short = env.log.short self.level = env.log.level self._option = { 'pdb': env.debug.pdb, 'reactor': env.log.enable and (env.log.all or env.log.reactor), 'daemon': env.log.enable and (env.log.all or env.log.daemon), 'processes': env.log.enable and (env.log.all or env.log.processes), 'configuration': env.log.enable and (env.log.all or env.log.configuration), 'network': env.log.enable and (env.log.all or env.log.network), 'wire': env.log.enable and (env.log.all or env.log.packets), 'message': env.log.enable and (env.log.all or env.log.message), 'rib': env.log.enable and (env.log.all or env.log.rib), 'timer': env.log.enable and (env.log.all or env.log.timers), 'routes': env.log.enable and (env.log.all or env.log.routes), 'parser': env.log.enable and (env.log.all or env.log.parser), } if not env.log.enable: self.destination = '' return self.destination = env.log.destination self.restart(True)
def cmdline(cmdarg): route = ''.join(cmdarg.payload).replace(' ', '') if not is_bgp(route): # parser.print_usage() sys.stdout.write('Environment values are:\n%s\n\n' % '\n'.join(' - %s' % _ for _ in Env.default())) sys.stdout.write('The BGP message must be an hexadecimal string.\n\n') sys.stdout.write('All colons or spaces are ignored, for example:\n\n') sys.stdout.write(' 001E0200000007900F0003000101\n') sys.stdout.write(' 001E:02:0000:0007:900F:0003:0001:01\n') sys.stdout.write( ' FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101\n') sys.stdout.write( ' FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01\n' ) sys.stdout.write( " FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101\n" ) sys.stdout.flush() sys.exit(1) env = getenv() env.bgp.passive = True env.log.parser = True env.tcp.bind = '' if cmdarg.debug: env.log.all = True env.log.level = 'DEBUG' if cmdarg.pdb: env.debug.pdb = True log.init(env) trace_interceptor(env.debug.pdb) sanitized = ''.join(cmdarg.payload).replace(':', '').replace(' ', '') Reactor([getconf(cmdarg.configuration)]).check(sanitized)
def cmdline(cmdarg): route = ''.join(cmdarg.payload).replace(' ', '') if not is_bgp(route): # parser.print_usage() sys.stdout.write('Environment values are:\n%s\n\n' % '\n'.join(' - %s' % _ for _ in Env.default())) sys.stdout.write('The BGP message must be an hexadecimal string.\n\n') sys.stdout.write('All colons or spaces are ignored, for example:\n\n') sys.stdout.write(' 001E0200000007900F0003000101\n') sys.stdout.write(' 001E:02:0000:0007:900F:0003:0001:01\n') sys.stdout.write( ' FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101\n') sys.stdout.write( ' FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01\n' ) sys.stdout.write( " FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101\n" ) sys.stdout.flush() sys.exit(1) env = getenv() env.log.parser = True env.debug.route = route env.tcp.bind = '' if cmdarg.debug: env.log.all = True env.log.level = syslog.LOG_DEBUG if cmdarg.pdb: env.debug.pdb = True log.init() Reactor([cmdarg.configuration]).run(False, ROOT)
def cmdline(cmdarg): pipename = getenv().api.pipename command = cmdarg.command pipes = named_pipe(ROOT, pipename) if len(pipes) != 1: sys.stdout.write( 'could not find ExaBGP\'s named pipes (%s.in and %s.out) for the cli\n' % (pipename, pipename)) sys.stdout.write( 'we scanned the following folders (the number is your PID):\n - ') sys.stdout.write('\n - '.join(pipes)) sys.stdout.flush() sys.exit(1) send = pipes[0] + pipename + '.in' recv = pipes[0] + pipename + '.out' if not check_fifo(send): sys.stdout.write( 'could not find write named pipe to connect to ExaBGP') sys.stdout.flush() sys.exit(1) if not check_fifo(recv): sys.stdout.write('could not find read named pipe to connect to ExaBGP') sys.stdout.flush() sys.exit(1) reader = open_reader(recv) rbuffer = b'' start = time.time() while True: try: while select.select([reader], [], [], 0) != ([], [], []): rbuffer += os.read(reader, 4096) rbuffer = rbuffer[-AnswerStream.buffer_size:] except IOError as exc: if exc.errno in error.block: continue sys.stdout.write( 'could not clear named pipe from potential previous command data (%s)' % str(exc)) sys.stdout.flush() sys.exit(1) except OSError as exc: if exc.errno in error.block: continue sys.stdout.write( 'could not clear named pipe from potential previous command data (%s)' % str(exc)) sys.stdout.write(exc) sys.stdout.flush() sys.exit(1) # we are not ack'ing the command and probably have read all there is if time.time() > start + 1.5: break # we read nothing, nothing to do if not rbuffer: break # we read some data but it is not ending by a new line (ie: not a command completion) if rbuffer[-1] != 10: # \n continue if AnswerStream.done.endswith(rbuffer[-len(AnswerStream.done):]): break if AnswerStream.error.endswith(rbuffer[-len(AnswerStream.error):]): break if AnswerStream.shutdown.endswith( rbuffer[-len(AnswerStream.shutdown):]): break renamed = [''] for pos, token in enumerate(command): for nickname, name, match in ( ('a', 'announce', lambda pos, pre: pos == 0 or pre.count('.') == 3 or pre.count(':') != 0), ('a', 'attributes', lambda pos, pre: pre[-1] == 'announce' or pre[-1] == 'withdraw'), ('c', 'configuration', lambda pos, pre: True), ('e', 'eor', lambda pos, pre: pre[-1] == 'announce'), ('e', 'extensive', lambda _, pre: 'show' in pre), ('f', 'flow', lambda pos, pre: pre[-1] == 'announce' or pre[-1] == 'withdraw'), ('f', 'flush', lambda pos, pre: pos == 0 or pre.count('.') == 3 or pre.count(':') != 0), ('h', 'help', lambda pos, pre: pos == 0), ('i', 'in', lambda pos, pre: pre[-1] == 'adj-rib'), ('n', 'neighbor', lambda pos, pre: pos == 0 or pre[-1] == 'show'), ('r', 'route', lambda pos, pre: pre == 'announce' or pre == 'withdraw'), ('rr', 'route-refresh', lambda _, pre: pre == 'announce'), ('s', 'show', lambda pos, pre: pos == 0), ('t', 'teardown', lambda pos, pre: pos == 0 or pre.count('.') == 3 or pre.count(':') != 0), ('s', 'summary', lambda pos, pre: pos != 0), ('v', 'vps', lambda pos, pre: pre[-1] == 'announce' or pre[-1] == 'withdraw'), ('o', 'operation', lambda pos, pre: pre[-1] == 'announce'), ('o', 'out', lambda pos, pre: pre[-1] == 'adj-rib'), ('a', 'adj-rib', lambda pos, pre: pre[-1] in ['clear', 'flush', 'show']), ('w', 'withdraw', lambda pos, pre: pos == 0 or pre.count('.') == 3 or pre.count(':') != 0), ('w', 'watchdog', lambda pos, pre: pre[-1] == 'announce' or pre[-1] == 'withdraw'), ('neighbour', 'neighbor', lambda pos, pre: True), ('neigbour', 'neighbor', lambda pos, pre: True), ('neigbor', 'neighbor', lambda pos, pre: True), ): if (token == nickname or name.startswith(token)) and match( pos, renamed): renamed.append(name) break else: renamed.append(token) sending = ' '.join(renamed).strip() # This does not change the behaviour for well formed command if sending != command: print('command: %s' % sending) writer = open_writer(send) try: os.write(writer, sending.encode('utf-8') + b'\n') os.close(writer) except IOError as exc: sys.stdout.write('could not send command to ExaBGP (%s)' % str(exc)) sys.stdout.flush() sys.exit(1) except OSError as exc: sys.stdout.write('could not send command to ExaBGP (%s)' % str(exc)) sys.stdout.flush() sys.exit(1) if command == 'reset': sys.exit(0) waited = 0.0 buf = b'' done = False done_time_diff = 0.5 while not done: try: r, _, _ = select.select([reader], [], [], 0.01) except OSError as exc: if exc.errno in error.block: continue sys.stdout.write('could not get answer from ExaBGP (%s)' % str(exc)) sys.stdout.flush() sys.exit(1) except IOError as exc: if exc.errno in error.block: continue sys.stdout.write('could not get answer from ExaBGP (%s)' % str(exc)) sys.stdout.flush() sys.exit(1) if waited > 5.0: sys.stderr.write('\n') sys.stderr.write('warning: no end of command message received\n') sys.stderr.write( 'warning: normal if exabgp.api.ack is set to false otherwise some data may get stuck on the pipe\n' ) sys.stderr.write( 'warning: otherwise it may cause exabgp reactor to block\n') sys.exit(0) elif not r: waited += 0.01 continue else: waited = 0.0 try: raw = os.read(reader, 4096) except OSError as exc: if exc.errno in error.block: continue sys.stdout.write('could not read answer from ExaBGP (%s)' % str(exc)) sys.stdout.flush() sys.exit(1) except IOError as exc: if exc.errno in error.block: continue sys.stdout.write('could not read answer from ExaBGP (%s)' % str(exc)) sys.stdout.flush() sys.exit(1) buf += raw while b'\n' in buf: line, buf = buf.split(b'\n', 1) string = line.decode() if string == Answer.done: done = True break if string == Answer.shutdown: sys.stderr.write('ExaBGP is shutting down, command aborted\n') sys.stderr.flush() done = True break if string == Answer.error: done = True sys.stderr.write( 'ExaBGP returns an error (see ExaBGP\'s logs for more information)\n' ) sys.stderr.write('use help for a list of available commands\n') sys.stderr.flush() break sys.stdout.write('%s\n' % string) sys.stdout.flush() if not getenv().api.ack and not raw.decode(): this_moment = time.time() recv_epoch_time = os.path.getmtime(recv) time_diff = this_moment - recv_epoch_time if time_diff >= done_time_diff: done = True try: os.close(reader) except Exception: pass sys.exit(0)
Created by Thomas Mangin on 2009-09-06. Copyright (c) 2009-2015 Exa Networks. All rights reserved. License: 3-clause BSD. (See the COPYRIGHT file) """ import unittest import os import sys import glob from exabgp.configuration.configuration import Configuration from exabgp.environment import getenv environ = getenv() environ.log.enable = True environ.log.all = False environ.log.configuration = False environ.log.parser = False class TestControl(unittest.TestCase): def setUp(self): location = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', 'conf', '*.conf')) self.files = glob.glob(location) # These files contains invalid attribute we can not parse skip = 'attributes.conf'
def check_update(neighbor, raw): option.enabled['parser'] = True path = {} for f in NLRI.known_families(): if neighbor['capability']['add-path']: path[f] = neighbor['capability']['add-path'] capa = Capabilities().new(neighbor, False) capa[Capability.CODE.ADD_PATH] = path capa[Capability.CODE.MULTIPROTOCOL] = neighbor.families() # capa[Capability.CODE.FOUR_BYTES_ASN] = True routerid_1 = str(neighbor['router-id']) routerid_2 = '.'.join( str((int(_) + 1) % 250) for _ in str(neighbor['router-id']).split('.', -1)) o1 = Open(Version(4), ASN(neighbor['local-as']), HoldTime(180), RouterID(routerid_1), capa) o2 = Open(Version(4), ASN(neighbor['peer-as']), HoldTime(180), RouterID(routerid_2), capa) negotiated = Negotiated(neighbor) negotiated.sent(o1) negotiated.received(o2) # grouped = False while raw: if raw.startswith(b'\xff' * 16): kind = raw[18] size = (raw[16] << 16) + raw[17] injected, raw = raw[19:size], raw[size:] if kind == 2: log.debug('the message is an update', 'parser') decoding = 'update' else: log.debug( 'the message is not an update (%d) - aborting' % kind, 'parser') return False else: log.debug('header missing, assuming this message is ONE update', 'parser') decoding = 'update' injected, raw = raw, '' try: # This does not take the BGP header - let's assume we will not break that :) update = Update.unpack_message(injected, Direction.IN, negotiated) except Notify: import traceback log.error('could not parse the message', 'parser') log.error(traceback.format_exc(), 'parser') if getenv().debug.pdb: raise return False except Exception: import traceback log.error('could not parse the message', 'parser') log.error(traceback.format_exc(), 'parser') if getenv().debug.pdb: raise return False log.debug('', 'parser') # new line for number in range(len(update.nlris)): change = Change(update.nlris[number], update.attributes) log.info( 'decoded %s %s %s' % (decoding, change.nlri.action, change.extensive()), 'parser') log.info( 'update json %s' % Response.JSON(json_version).update( neighbor, 'in', update, None, '', ''), 'parser') return True
def post(self): for inherited in self.scope.pop('inherit', []): data = self.scope.template('neighbor', inherited) self.scope.inherit(data) local = self.scope.get() neighbor = Neighbor() for option in neighbor.defaults: conf = local.get(option, None) if conf is not None: neighbor[option] = conf # XXX: use the right class for the data type # XXX: we can use the scope.nlri interface ( and rename it ) to set some values capability = local.get('capability', {}) for option in neighbor.Capability.defaults: conf = capability.get(option, None) if conf is not None: neighbor['capability'][option] = conf neighbor.api = ParseAPI.flatten(local.pop('api', {})) missing = neighbor.missing() if missing: return self.error.set(missing) neighbor.infer() families = [] for family in ParseFamily.convert: for pair in local.get('family', {}).get(family, []): families.append(pair) families = families or NLRI.known_families() for family in families: neighbor.add_family(family) if neighbor['capability']['add-path']: add_path = local.get('add-path', {}) if add_path: for family in ParseAddPath.convert: for pair in add_path.get(family, []): if pair not in families: log.debug( 'skipping add-path family ' + str(pair) + ' as it is not negotiated', 'configuration') continue neighbor.add_addpath(pair) else: for family in families: neighbor.add_addpath(family) # The default is to auto-detect by the presence of the nexthop block # if this is manually set, then we honor it nexthop = local.get('nexthop', {}) if neighbor['capability']['nexthop'] is None and nexthop: neighbor['capability']['nexthop'] = True if neighbor['capability']['nexthop']: nexthops = [] for family in nexthop: nexthops.extend(nexthop[family]) if nexthops: for afi, safi, nhafi in nexthops: if (afi, safi) not in neighbor.families(): log.debug( 'skipping nexthop afi,safi ' + str(afi) + '/' + str(safi) + ' as it is not negotiated', 'configuration', ) continue if (nhafi, safi) not in neighbor.families(): log.debug( 'skipping nexthop afi ' + str(nhafi) + '/' + str(safi) + ' as it is not negotiated', 'configuration', ) continue neighbor.add_nexthop(afi, safi, nhafi) neighbor.changes = [] neighbor.changes.extend(self.scope.pop_routes()) # old format for section in ('static', 'l2vpn', 'flow'): routes = local.get(section, {}).get('routes', []) for route in routes: route.nlri.action = OUT.ANNOUNCE neighbor.changes.extend(routes) routes = local.get('routes', []) for route in routes: route.nlri.action = OUT.ANNOUNCE neighbor.changes.extend(routes) messages = local.get('operational', {}).get('routes', []) if neighbor['local-address'] is None: neighbor.auto_discovery = True neighbor['local-address'] = None neighbor['md5-ip'] = None if not neighbor['router-id']: if neighbor[ 'peer-address'].afi == AFI.ipv4 and not neighbor.auto_discovery: neighbor['router-id'] = neighbor['local-address'] else: return self.error.set( 'missing router-id for the peer, it can not be set using the local-ip' ) if neighbor['capability']['route-refresh']: if neighbor['adj-rib-out']: log.debug('route-refresh requested, enabling adj-rib-out', 'configuration') missing = neighbor.missing() if missing: return self.error.set('incomplete neighbor, missing %s' % missing) if not neighbor.auto_discovery and neighbor[ 'local-address'].afi != neighbor['peer-address'].afi: return self.error.set( 'local-address and peer-address must be of the same family') neighbor.range_size = neighbor['peer-address'].mask.size() if neighbor.range_size > 1 and not (neighbor['passive'] or getenv().bgp.passive): return self.error.set( 'can only use ip ranges for the peer address with passive neighbors' ) if neighbor.index() in self._neighbors: return self.error.set('duplicate peer definition %s' % neighbor['peer-address'].top()) self._neighbors.append(neighbor.index()) if neighbor['md5-password']: try: md5 = base64.b64decode( neighbor['md5-password'] ) if neighbor['md5-base64'] else neighbor['md5-password'] except TypeError as e: return self.error.set( f"Invalid base64 encoding of MD5 password ({e})") else: if len(md5) > 80: return self.error.set( 'MD5 password must be no larger than 80 characters') # check we are not trying to announce routes without the right MP announcement for change in neighbor.changes: family = change.nlri.family() if family not in families and family != (AFI.ipv4, SAFI.unicast): return self.error.set( 'Trying to announce a route of type %s,%s when we are not announcing the family to our peer' % change.nlri.family()) def _init_neighbor(neighbor): families = neighbor.families() for change in neighbor.changes: if change.nlri.family() in families: # This add the family to neighbor.families() neighbor.rib.outgoing.add_to_rib_watchdog(change) for message in messages: if message.family() in families: if message.name == 'ASM': neighbor.asm[message.family()] = message else: neighbor.messages.append(message) self.neighbors[neighbor.name()] = neighbor # create one neighbor object per family for multisession if neighbor['capability']['multi-session'] and len( neighbor.families()) > 1: for family in neighbor.families(): # XXX: FIXME: Ok, it works but it takes LOTS of memory .. m_neighbor = deepcopy(neighbor) m_neighbor.make_rib() m_neighbor.rib.outgoing.families = [family] _init_neighbor(m_neighbor) else: neighbor.make_rib() _init_neighbor(neighbor) local.clear() return True
def run(self): self.daemon.daemonise() # Make sure we create processes once we have closed file descriptor # unfortunately, this must be done before reading the configuration file # so we can not do it with dropped privileges self.processes = Processes() # we have to read the configuration possibly with root privileges # as we need the MD5 information when we bind, and root is needed # to bind to a port < 1024 # this is undesirable as : # - handling user generated data as root should be avoided # - we may not be able to reload the configuration once the privileges are dropped # but I can not see any way to avoid it for ip in self._ips: if not self.listener.listen_on(ip, None, self._port, None, False, None): return self.Exit.listening if not self.reload(): return self.Exit.configuration for neighbor in self.configuration.neighbors.values(): if neighbor['listen']: if not self.listener.listen_on( neighbor['md5-ip'], neighbor['peer-address'], neighbor['listen'], neighbor['md5-password'], neighbor['md5-base64'], neighbor['incoming-ttl'], ): return self.Exit.listening if not self.early_drop: self.processes.start(self.configuration.processes) if not self.daemon.drop_privileges(): log.critical( 'could not drop privileges to \'%s\' refusing to run as root' % self.daemon.user, 'reactor') log.critical( 'set the environmemnt value exabgp.daemon.user to change the unprivileged user', 'reactor') return self.Exit.privileges if self.early_drop: self.processes.start(self.configuration.processes) # This is required to make sure we can write in the log location as we now have dropped root privileges log.init(getenv()) if not self.daemon.savepid(): return self.Exit.pid wait = getenv().tcp.delay if wait: sleeptime = (wait * 60) - int(time.time()) % (wait * 60) log.debug('waiting for %d seconds before connecting' % sleeptime, 'reactor') time.sleep(float(sleeptime)) workers = {} peers = set() api_fds = [] ms_sleep = int(self._sleep_time * 1000) while True: try: if self.signal.received: signaled = self.signal.received # report that we received a signal for key in self._peers: if self._peers[key].neighbor.api['signal']: self._peers[key].reactor.processes.signal( self._peers[key].neighbor, self.signal.number) self.signal.rearm() # we always want to exit if signaled == Signal.SHUTDOWN: self.exit_code = self.Exit.normal self.shutdown() break # it does mot matter what we did if we are restarting # as the peers and network stack are replaced by new ones if signaled == Signal.RESTART: self.restart() continue # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ? if self._pending_adjribout(): continue if signaled == Signal.RELOAD: self.reload() self.processes.start(self.configuration.processes, False) continue if signaled == Signal.FULL_RELOAD: self.reload() self.processes.start(self.configuration.processes, True) continue if self.listener.incoming(): # check all incoming connection self.asynchronous.schedule( str(uuid.uuid1()), 'checking for new connection(s)', self.listener.new_connections()) sleep = ms_sleep # do not attempt to listen on closed sockets even if the peer is still here for io in list(workers.keys()): if io == -1: self._poller.unregister(io) del workers[io] peers = self.active_peers() # give a turn to all the peers for key in list(peers): peer = self._peers[key] # limit the number of message handling per second if self._rate_limited(key, peer.neighbor['rate-limit']): peers.discard(key) continue # handle the peer action = peer.run() # .run() returns an ACTION enum: # * immediate if it wants to be called again # * later if it should be called again but has no work atm # * close if it is finished and is closing down, or restarting if action == ACTION.CLOSE: if key in self._peers: del self._peers[key] peers.discard(key) # we are loosing this peer, not point to schedule more process work elif action == ACTION.LATER: io = peer.socket() if io != -1: self._poller.register( io, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLNVAL | select.POLLERR) workers[io] = key # no need to come back to it before a a full cycle peers.discard(key) elif action == ACTION.NOW: sleep = 0 if not peers: break # read at least on message per process if there is some and parse it for service, command in self.processes.received(): self.api.text(self, service, command) sleep = 0 self.asynchronous.run() if api_fds != self.processes.fds: for fd in api_fds: if fd == -1: continue if fd not in self.processes.fds: self._poller.unregister(fd) for fd in self.processes.fds: if fd == -1: continue if fd not in api_fds: self._poller.register( fd, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLNVAL | select.POLLERR) api_fds = self.processes.fds for io in self._wait_for_io(sleep): if io not in api_fds: peers.add(workers[io]) if self._stopping and not self._peers.keys(): self._termination('exiting on peer termination', self.Exit.normal) except KeyboardInterrupt: self._termination('^C received', self.Exit.normal) except SystemExit: self._termination('exiting', self.Exit.normal) # socket.error is a subclass of IOError (so catch it first) except socket.error: self._termination('socket error received', self.Exit.socket) except IOError: self._termination( 'I/O Error received, most likely ^C during IO', self.Exit.io_error) except ProcessError: self._termination( 'Problem when sending message(s) to helper program, stopping', self.Exit.process) except select.error: self._termination('problem using select, stopping', self.Exit.select) return self.exit_code
def __init__(self, configurations, text=False): _Configuration.__init__(self) self.api_encoder = getenv().api.encoder self._configurations = configurations self._text = text self.error = Error() self.scope = Scope() self.tokeniser = Tokeniser(self.scope, self.error) params = (self.tokeniser, self.scope, self.error) self.section = Section(*params) self.process = ParseProcess(*params) self.template = ParseTemplate(*params) self.template_neighbor = ParseTemplateNeighbor(*params) self.neighbor = ParseNeighbor(*params) self.family = ParseFamily(*params) self.addpath = ParseAddPath(*params) self.nexthop = ParseNextHop(*params) self.capability = ParseCapability(*params) self.api = ParseAPI(*params) self.api_send = ParseSend(*params) self.api_receive = ParseReceive(*params) self.static = ParseStatic(*params) self.static_route = ParseStaticRoute(*params) self.announce = SectionAnnounce(*params) self.announce_ipv4 = AnnounceIPv4(*params) self.announce_ipv6 = AnnounceIPv6(*params) self.announce_l2vpn = AnnounceL2VPN(*params) self.flow = ParseFlow(*params) self.flow_route = ParseFlowRoute(*params) self.flow_match = ParseFlowMatch(*params) self.flow_then = ParseFlowThen(*params) self.flow_scope = ParseFlowScope(*params) self.l2vpn = ParseL2VPN(*params) self.vpls = ParseVPLS(*params) self.operational = ParseOperational(*params) # We should check if name are unique when running Section.__init__ self._structure = { 'root': { 'class': self.section, 'commands': [], 'sections': { 'process': self.process.name, 'neighbor': self.neighbor.name, 'template': self.template.name, }, }, self.process.name: { 'class': self.process, 'commands': self.process.known.keys(), 'sections': {}, }, self.template.name: { 'class': self.template, 'commands': self.template.known.keys(), 'sections': { 'neighbor': self.template_neighbor.name, }, }, self.template_neighbor.name: { 'class': self.template_neighbor, 'commands': self.template_neighbor.known.keys(), 'sections': { 'family': self.family.name, 'capability': self.capability.name, 'add-path': self.addpath.name, 'nexthop': self.nexthop.name, 'api': self.api.name, 'static': self.static.name, 'flow': self.flow.name, 'l2vpn': self.l2vpn.name, 'operational': self.operational.name, 'announce': self.announce.name, }, }, self.neighbor.name: { 'class': self.neighbor, 'commands': self.neighbor.known.keys(), 'sections': { 'family': self.family.name, 'capability': self.capability.name, 'add-path': self.addpath.name, 'nexthop': self.nexthop.name, 'api': self.api.name, 'static': self.static.name, 'flow': self.flow.name, 'l2vpn': self.l2vpn.name, 'operational': self.operational.name, 'announce': self.announce.name, }, }, self.family.name: { 'class': self.family, 'commands': self.family.known.keys(), 'sections': {}, }, self.capability.name: { 'class': self.capability, 'commands': self.capability.known.keys(), 'sections': {}, }, self.nexthop.name: { 'class': self.nexthop, 'commands': self.nexthop.known.keys(), 'sections': {}, }, self.addpath.name: { 'class': self.addpath, 'commands': self.addpath.known.keys(), 'sections': {}, }, self.api.name: { 'class': self.api, 'commands': self.api.known.keys(), 'sections': { 'send': self.api_send.name, 'receive': self.api_receive.name, }, }, self.api_send.name: { 'class': self.api_send, 'commands': self.api_send.known.keys(), 'sections': {}, }, self.api_receive.name: { 'class': self.api_receive, 'commands': self.api_receive.known.keys(), 'sections': {}, }, self.announce.name: { 'class': self.announce, 'commands': self.announce.known.keys(), 'sections': { 'ipv4': self.announce_ipv4.name, 'ipv6': self.announce_ipv6.name, 'l2vpn': self.announce_l2vpn.name, }, }, self.announce_ipv4.name: { 'class': self.announce_ipv4, 'commands': [ 'unicast', 'multicast', 'nlri-mpls', 'mpls-vpn', 'flow', 'flow-vpn' ], 'sections': {}, }, self.announce_ipv6.name: { 'class': self.announce_ipv6, 'commands': [ 'unicast', 'multicast', 'nlri-mpls', 'mpls-vpn', 'flow', 'flow-vpn' ], 'sections': {}, }, self.announce_l2vpn.name: { 'class': self.announce_l2vpn, 'commands': [ 'vpls', ], 'sections': {}, }, self.static.name: { 'class': self.static, 'commands': ['route', 'attributes'], 'sections': { 'route': self.static_route.name, }, }, self.static_route.name: { 'class': self.static_route, 'commands': self.static_route.known.keys(), 'sections': {}, }, self.flow.name: { 'class': self.flow, 'commands': self.flow.known.keys(), 'sections': { 'route': self.flow_route.name, }, }, self.flow_route.name: { 'class': self.flow_route, 'commands': self.flow_route.known.keys(), 'sections': { 'match': self.flow_match.name, 'then': self.flow_then.name, 'scope': self.flow_scope.name, }, }, self.flow_match.name: { 'class': self.flow_match, 'commands': self.flow_match.known.keys(), 'sections': {}, }, self.flow_then.name: { 'class': self.flow_then, 'commands': self.flow_then.known.keys(), 'sections': {}, }, self.flow_scope.name: { 'class': self.flow_scope, 'commands': self.flow_scope.known.keys(), 'sections': {} }, self.l2vpn.name: { 'class': self.l2vpn, 'commands': self.l2vpn.known.keys(), 'sections': { 'vpls': self.vpls.name, }, }, self.vpls.name: { 'class': self.vpls, 'commands': self.l2vpn.known.keys(), 'sections': {}, }, self.operational.name: { 'class': self.operational, 'commands': self.operational.known.keys(), 'sections': {}, }, } self._neighbors = {} self._previous_neighbors = {}
def __init__(self, version): self.version = version self.time = nop self.compact = getenv().api.compact
def run(comment, configurations, validate, pid=0): env = getenv() log.notice('Thank you for using ExaBGP', 'welcome') log.notice('%s' % version, 'version') log.notice('%s' % sys.version.replace('\n', ' '), 'interpreter') log.notice('%s' % ' '.join(platform.uname()[:5]), 'os') log.notice('%s' % ROOT, 'installation') if comment: log.notice(comment, 'advice') warning = warn() if warning: log.warning(warning, 'advice') if env.api.cli: pipename = 'exabgp' if env.api.pipename is None else env.api.pipename pipes = named_pipe(ROOT, pipename) if len(pipes) != 1: env.api.cli = False log.error( 'could not find the named pipes (%s.in and %s.out) required for the cli' % (pipename, pipename), 'cli' ) log.error('we scanned the following folders (the number is your PID):', 'cli') for location in pipes: log.error(' - %s' % location, 'cli control') log.error('please make them in one of the folder with the following commands:', 'cli control') log.error('> mkfifo %s/run/%s.{in,out}' % (os.getcwd(), pipename), 'cli control') log.error('> chmod 600 %s/run/%s.{in,out}' % (os.getcwd(), pipename), 'cli control') if os.getuid() != 0: log.error( '> chown %d:%d %s/run/%s.{in,out}' % (os.getuid(), os.getgid(), os.getcwd(), pipename), 'cli control', ) else: pipe = pipes[0] os.environ['exabgp_cli_pipe'] = pipe os.environ['exabgp_api_pipename'] = pipename log.info('named pipes for the cli are:', 'cli control') log.info('to send commands %s%s.in' % (pipe, pipename), 'cli control') log.info('to read responses %s%s.out' % (pipe, pipename), 'cli control') if not env.profile.enable: exit_code = Reactor(configurations).run(validate, ROOT) __exit(env.debug.memory, exit_code) try: import cProfile as profile except ImportError: import profile if env.profile.file == 'stdout': profiled = 'Reactor(%s).run(%s,"%s")' % (str(configurations), str(validate), str(ROOT)) exit_code = profile.run(profiled) __exit(env.debug.memory, exit_code) if pid: profile_name = "%s-pid-%d" % (env.profile.file, pid) else: profile_name = env.profile.file notice = '' if os.path.isdir(profile_name): notice = 'profile can not use this filename as output, it is not a directory (%s)' % profile_name if os.path.exists(profile_name): notice = 'profile can not use this filename as output, it already exists (%s)' % profile_name if not notice: cwd = os.getcwd() log.debug('profiling ....', 'reactor') profiler = profile.Profile() profiler.enable() try: exit_code = Reactor(configurations).run(validate, ROOT) except Exception: exit_code = Reactor.Exit.unknown raise finally: from exabgp.vendoring import lsprofcalltree profiler.disable() kprofile = lsprofcalltree.KCacheGrind(profiler) try: destination = profile_name if profile_name.startswith('/') else os.path.join(cwd, profile_name) with open(destination, 'w+') as write: kprofile.output(write) except IOError: notice = 'could not save profiling in formation at: ' + destination log.debug("-" * len(notice), 'reactor') log.debug(notice, 'reactor') log.debug("-" * len(notice), 'reactor') __exit(env.debug.memory, exit_code) else: log.debug("-" * len(notice), 'reactor') log.debug(notice, 'reactor') log.debug("-" * len(notice), 'reactor') Reactor(configurations).run(validate, ROOT) __exit(env.debug.memory, 1)
from exabgp.bgp.message import Open from exabgp.bgp.message.open import Version from exabgp.bgp.message.open import ASN from exabgp.bgp.message.open import RouterID from exabgp.bgp.message.open import HoldTime from exabgp.bgp.message.open.capability import Capabilities from exabgp.bgp.message.open.capability import Capability from exabgp.bgp.message.open.capability import Negotiated from exabgp.bgp.message.update.nlri import NLRI from exabgp.bgp.message.direction import Direction from exabgp.logger import log from exabgp.environment import getenv log.init(getenv()) bodies = [] # fmt: off body = [ 0x0, 0x0, # len withdrawn routes # No routes to remove # Attributes 0x0, 0x30, # len attributes (48) 0x40, # Flag Transitive 0x1, # Code : Attribute ID Origin 0x1, # len 0x0, # Origin : IGP
def cmdline(cmdarg): if not os.path.isfile(ENVFILE): comment = 'environment file missing\ngenerate it using "exabgp env --fi > %s"' % ENVFILE else: comment = '' env = getenv() # Must be done before setting the logger as it modify its behaviour if cmdarg.debug: env.log.all = True env.log.level = syslog.LOG_DEBUG log.init() if cmdarg.profile: env.profile.enable = True env.profile.file = cmdarg.profile if cmdarg.once: env.tcp.once = True if cmdarg.pdb: env.debug.pdb = True if cmdarg.test: env.log.parser = True if cmdarg.memory: env.debug.memory = True if env.cache.attributes: Attribute.caching = env.cache.attributes configurations = [] for configuration in cmdarg.configuration: location = getconf(configuration) if not location: log.critical(f'{configuration} is not an exabgp config file', 'configuration') sys.exit(1) configurations.append(configuration) delay = cmdarg.signal _delayed_signal(delay, signal.SIGUSR1) if env.debug.rotate or len(configurations) == 1: run(comment, configurations, cmdarg.validate) if not (env.log.destination in ('syslog', 'stdout', 'stderr') or env.log.destination.startswith('host:')): log.error('can not log to files when running multiple configuration (as we fork)', 'configuration') sys.exit(1) try: # run each configuration in its own process pids = [] for configuration in configurations: pid = os.fork() if pid == 0: run(comment, [configuration], cmdarg.validate, os.getpid()) else: pids.append(pid) # If we get a ^C / SIGTERM, ignore just continue waiting for our child process signal.signal(signal.SIGINT, signal.SIG_IGN) # wait for the forked processes for pid in pids: os.waitpid(pid, 0) except OSError as exc: log.critical('can not fork, errno %d : %s' % (exc.errno, exc.strerror), 'reactor') sys.exit(1)