def __init__ (self, neighbor, reactor): try: self.logger = Logger() # We only to try to connect via TCP once self.once = environment.settings().tcp.once self.bind = True if environment.settings().tcp.bind else False except RuntimeError: self.logger = FakeLogger() self.once = True self.reactor = reactor self.neighbor = neighbor # The next restart neighbor definition self._neighbor = None # The peer should restart after a stop self._restart = True # The peer was restarted (to know what kind of open to send for graceful restart) self._restarted = FORCE_GRACEFUL self._reset_skip() # We want to remove routes which are not in the configuration anymote afte a signal to reload self._reconfigure = True # We want to send all the known routes self._resend_routes = SEND.DONE # We have new routes for the peers self._have_routes = True # We have been asked to teardown the session with this code self._teardown = None # This skipping business should have its own class, like KA self._skip_time = None self._next_skip = 0 self.recv_timer = None self._ = {'in':{},'out':{}} self._['in']['state'] = STATE.IDLE self._['out']['state'] = STATE.IDLE # value to reset 'generator' to self._['in']['enabled'] = False self._['out']['enabled'] = None if not self.neighbor.passive else False # the networking code self._['out']['proto'] = None self._['in']['proto'] = None # the networking code self._['out']['code'] = self._connect self._['in']['code'] = self._accept # the generator used by the main code # * False, the generator for this direction is down # * Generator, the code to run to connect or accept the connection # * None, the generator must be re-created self._['in']['generator'] = self._['in']['enabled'] self._['out']['generator'] = self._['out']['enabled']
def __init__ (self,configuration): self.ip = environment.settings().tcp.bind self.port = environment.settings().tcp.port self.max_loop_time = environment.settings().reactor.speed self.half_loop_time = self.max_loop_time / 2 self.logger = Logger() self.daemon = Daemon(self) self.processes = None self.listener = None self.configuration = Configuration(configuration) self._peers = {} self._shutdown = False self._reload = False self._reload_processes = False self._restart = False self._route_update = False self._saved_pid = False self._commands = [] self._pending = [] signal.signal(signal.SIGTERM, self.sigterm) signal.signal(signal.SIGHUP, self.sighup) signal.signal(signal.SIGALRM, self.sigalrm) signal.signal(signal.SIGUSR1, self.sigusr1) signal.signal(signal.SIGUSR2, self.sigusr2)
def debug_check_route (self): # we are not really running the program, just want to .... if environment.settings().debug.route: from exabgp.configuration.check import check_message if check_message(self.neighbors,environment.settings().debug.route): sys.exit(0) sys.exit(1)
def __init__ (self, configurations): self._ips = environment.settings().tcp.bind self._port = environment.settings().tcp.port self._stopping = environment.settings().tcp.once self.exit_code = self.Exit.unknown self.max_loop_time = environment.settings().reactor.speed self._sleep_time = self.max_loop_time / 100 self._busyspin = {} self.early_drop = environment.settings().daemon.drop self.processes = None self.configuration = Configuration(configurations) self.logger = Logger() self.asynchronous = ASYNC() self.signal = Signal() self.daemon = Daemon(self) self.listener = Listener(self) self.api = API(self) self.peers = {} self._reload_processes = False self._saved_pid = False
def __init__ (self,afi,peer,local): # peer and local are strings of the IP # If the OS tells us we have data on the socket, we should never have to wait more than read_timeout to be able to read it. # However real life says that on some OS we do ... So let the user control this value try: self.read_timeout = environment.settings().tcp.timeout self.defensive = environment.settings().debug.defensive self.logger = Logger() except RuntimeError: self.read_timeout = 1 self.defensive = True self.logger = FakeLogger() self.afi = afi self.peer = peer self.local = local self._reading = None self._writing = None self._buffer = '' self.io = None self.established = False self.identifier += 1 self.id = self.identifier
def _reload (self): # taking the first configuration available (FIFO buffer) fname = self._configurations.pop(0) self.process.configuration(fname) self._configurations.append(fname) # clearing the current configuration to be able to re-parse it self._clear() if not self.tokens.set_file(fname): return False # parsing the configuration r = False while not self.tokens.finished: r = self._dispatch( self._scope,'root','configuration', self._tree['configuration'].keys(), [] ) if r is False: break # handling possible parsing errors if r not in [True,None]: # making sure nothing changed self.neighbor.cancel() return self.error.set( "\n" "syntax error in section %s\n" "line %d: %s\n" "\n%s" % ( self._location[-1], self.tokens.number, ' '.join(self.tokens.line), str(self.error) ) ) # installing in the neighbor the API routes self.neighbor.complete() # we are not really running the program, just want to .... if environment.settings().debug.route: from exabgp.configuration.current.check import check_message if check_message(self.neighbor.neighbors,environment.settings().debug.route): sys.exit(0) sys.exit(1) # we are not really running the program, just want check the configuration validity if environment.settings().debug.selfcheck: from exabgp.configuration.current.check import check_neighbor if check_neighbor(self.neighbor.neighbors): sys.exit(0) sys.exit(1) return True
def __init__ (self): self.logger = Logger() self.clean() self.silence = False self._buffer = {} self._configuration = {} self.respawn_number = 5 if environment.settings().api.respawn else 0 self.terminate_on_error = environment.settings().api.terminate self.ack = environment.settings().api.ack
def _reload (self): # taking the first configuration available (FIFO buffer) fname = self._configurations.pop(0) self.process.configuration(fname) self._configurations.append(fname) # storing the routes associated with each peer so we can find what changed backup_changes = {} for neighbor in self._neighbors: backup_changes[neighbor] = self._neighbors[neighbor].changes # clearing the current configuration to be able to re-parse it self._clear() if not self.tokens.set_file(fname): return False # parsing the configurtion r = False while not self.tokens.finished: r = self._dispatch( self._scope,'configuration', ['group','neighbor'], [] ) if r is False: break # handling possible parsing errors if r not in [True,None]: return self.error.set("\nsyntax error in section %s\nline %d: %s\n\n%s" % (self._location[-1],self.tokens.number,' '.join(self.tokens.line),self.error)) # parsing was sucessful, assigning the result self.neighbors = self._neighbors # installing in the neighbor what was its previous routes so we can # add/withdraw what need to be for neighbor in self.neighbors: self.neighbors[neighbor].backup_changes = backup_changes.get(neighbor,[]) # we are not really running the program, just want to .... if environment.settings().debug.route: from exabgp.configuration.check import check_message if check_message(self.neighbors,environment.settings().debug.route): sys.exit(0) sys.exit(1) # we are not really running the program, just want check the configuration validity if environment.settings().debug.selfcheck: from exabgp.configuration.check import check_neighbor if check_neighbor(self.neighbors): sys.exit(0) sys.exit(1) return True
def __init__ (self, neighbor, reactor): try: self.logger = Logger() # We only to try to connect via TCP once self.once = environment.settings().tcp.once self.bind = True if environment.settings().tcp.bind else False except RuntimeError: self.logger = FakeLogger() self.once = False self.bind = True self.reactor = reactor self.neighbor = neighbor # The next restart neighbor definition self._neighbor = None # The peer should restart after a stop self._restart = True # The peer was restarted (to know what kind of open to send for graceful restart) self._restarted = FORCE_GRACEFUL # We want to remove routes which are not in the configuration anymote afte a signal to reload self._reconfigure = True # We want to send all the known routes self._resend_routes = SEND.DONE # We have new routes for the peers self._have_routes = True # We have been asked to teardown the session with this code self._teardown = None self._delay = Delay() self.recv_timer = None self._incoming = Direction ( 'in', self._accept, FSM(FSM.IDLE), None, False, False ) self._outgoing = Direction ( 'out', self._connect, FSM(FSM.IDLE), None, None if not self.neighbor.passive else False, None if not self.neighbor.passive else False ) self._incoming.opposite = self._outgoing self._outgoing.opposite = self._incoming
def __init__ (self, reactor): self.pid = environment.settings().daemon.pid self.user = environment.settings().daemon.user self.daemonize = environment.settings().daemon.daemonize self.umask = environment.settings().daemon.umask self.logger = Logger() self.reactor = reactor os.chdir('/') os.umask(self.umask)
def __init__(self, peer): try: self.logger = Logger() except RuntimeError: self.logger = FakeLogger() self.peer = peer self.neighbor = peer.neighbor self.negotiated = Negotiated(self.neighbor) self.connection = None if self.neighbor.connect: self.port = self.neighbor.connect elif os.environ.get('exabgp.tcp.port', '').isdigit(): self.port = int(os.environ.get('exabgp.tcp.port')) elif os.environ.get('exabgp_tcp_port', '').isdigit(): self.port = int(os.environ.get('exabgp_tcp_port')) else: self.port = 179 # XXX: FIXME: check the the -19 is correct (but it is harmless) # The message size is the whole BGP message _without_ headers self.message_size = Message.MAX_LEN - Message.HEADER_LEN from exabgp.configuration.environment import environment self.log_routes = environment.settings().log.routes
def __init__ (self): if self._instance.get('class',None) is not None: return self._instance['class'] = self command = environment.settings() self.short = command.log.short self.level = command.log.level self._option = { 'pdb': command.debug.pdb, 'reactor': command.log.enable and (command.log.all or command.log.reactor), 'daemon': command.log.enable and (command.log.all or command.log.daemon), 'processes': command.log.enable and (command.log.all or command.log.processes), 'configuration': command.log.enable and (command.log.all or command.log.configuration), 'network': command.log.enable and (command.log.all or command.log.network), 'wire': command.log.enable and (command.log.all or command.log.packets), 'message': command.log.enable and (command.log.all or command.log.message), 'rib': command.log.enable and (command.log.all or command.log.rib), 'timer': command.log.enable and (command.log.all or command.log.timers), 'routes': command.log.enable and (command.log.all or command.log.routes), 'parser': command.log.enable and (command.log.all or command.log.parser), } if not command.log.enable: self.destination = '' return self.destination = command.log.destination self.restart(True)
def callback (): families = None lines_per_yield = environment.settings().api.chunk if last in ('routes', 'extensive', 'static', 'flow', 'l2vpn'): peers = list(reactor.peers) else: peers = [n for n in reactor.peers.keys() if 'neighbor %s' % last in n] for key in peers: peer = reactor.peers.get(key, None) if not peer: continue if advertised: families = peer.proto.negotiated.families if peer.proto else [] rib = peer.neighbor.rib.outgoing if rib_name == 'out' else peer.neighbor.rib.incoming routes = list(rib.cached_changes(families)) while routes: changes, routes = routes[:lines_per_yield], routes[lines_per_yield:] for change in changes: if isinstance(change.nlri, route_type): if extensive: reactor.processes.answer(service,'neighbor %s %s %s' % (peer.neighbor.name(),'%s %s' % change.nlri.family(),change.extensive()),force=True) else: reactor.processes.answer(service,'neighbor %s %s %s' % (peer.neighbor.peer_address,'%s %s' % change.nlri.family(),str(change.nlri)),force=True) yield True reactor.processes.answer_done(service)
def daemonise(self): if not self.daemonize: return log = environment.settings().log if log.enable and log.destination.lower() in ('stdout', 'stderr'): self.logger.daemon( 'ExaBGP can not fork when logs are going to %s' % log.destination.lower(), 'critical') return def fork_exit(): try: pid = os.fork() if pid > 0: os._exit(0) except OSError as exc: self.logger.reactor( 'Can not fork, errno %d : %s' % (exc.errno, exc.strerror), 'critical') # do not detach if we are already supervised or run by init like process if self._is_socket(sys.__stdin__.fileno()) or os.getppid() == 1: return fork_exit() os.setsid() fork_exit() self.silence()
def debug_self_check(self): # we are not really running the program, just want check the configuration validity if environment.settings().debug.selfcheck: from exabgp.configuration.check import check_neighbor if check_neighbor(self.neighbors): sys.exit(0) sys.exit(1)
def __init__ (self): if self._instance.get('class',None) is not None: return self._instance['class'] = self command = environment.settings() self.short = command.log.short self.level = command.log.level self._option = HashTable() self._option.pdb = command.debug.pdb self._option.reactor = command.log.enable and (command.log.all or command.log.reactor) self._option.daemon = command.log.enable and (command.log.all or command.log.daemon) self._option.processes = command.log.enable and (command.log.all or command.log.processes) self._option.configuration = command.log.enable and (command.log.all or command.log.configuration) self._option.network = command.log.enable and (command.log.all or command.log.network) self._option.wire = command.log.enable and (command.log.all or command.log.packets) self._option.message = command.log.enable and (command.log.all or command.log.message) self._option.rib = command.log.enable and (command.log.all or command.log.rib) self._option.timer = command.log.enable and (command.log.all or command.log.timers) self._option.routes = command.log.enable and (command.log.all or command.log.routes) self._option.parser = command.log.enable and (command.log.all or command.log.parser) if not command.log.enable: self.destination = '' return self.destination = command.log.destination self.restart(True)
def __init__ (self): command = environment.settings() self.short = command.log.short self.level = command.log.level self._pdb = command.debug.pdb self._reactor = command.log.enable and (command.log.all or command.log.reactor) self._daemon = command.log.enable and (command.log.all or command.log.daemon) self._processes = command.log.enable and (command.log.all or command.log.processes) self._configuration = command.log.enable and (command.log.all or command.log.configuration) self._network = command.log.enable and (command.log.all or command.log.network) self._wire = command.log.enable and (command.log.all or command.log.packets) self._message = command.log.enable and (command.log.all or command.log.message) self._rib = command.log.enable and (command.log.all or command.log.rib) self._timer = command.log.enable and (command.log.all or command.log.timers) self._routes = command.log.enable and (command.log.all or command.log.routes) self._parser = command.log.enable and (command.log.all or command.log.parser) if not command.log.enable: return self.destination = command.log.destination self.restart(True)
def __init__(self): command = environment.settings() self.short = command.log.short self.level = command.log.level self._pdb = command.debug.pdb self._reactor = command.log.enable and (command.log.all or command.log.reactor) self._daemon = command.log.enable and (command.log.all or command.log.daemon) self._processes = command.log.enable and (command.log.all or command.log.processes) self._configuration = command.log.enable and ( command.log.all or command.log.configuration) self._network = command.log.enable and (command.log.all or command.log.network) self._wire = command.log.enable and (command.log.all or command.log.packets) self._message = command.log.enable and (command.log.all or command.log.message) self._rib = command.log.enable and (command.log.all or command.log.rib) self._timer = command.log.enable and (command.log.all or command.log.timers) self._routes = command.log.enable and (command.log.all or command.log.routes) self._parser = command.log.enable and (command.log.all or command.log.parser) if not command.log.enable: return self.destination = command.log.destination self.restart(True)
def __init__(self, location=None, production=True): from exabgp.configuration.environment import environment self.location = location or environment.settings().api.socket self.unix = None self.sock = None self._production = production
def __init__ (self): if self._instance.get('class',None) is not None: return self._instance['class'] = self command = environment.settings() self.short = command.log.short self.level = command.log.level self._option = HashTable() self._option.pdb = command.debug.pdb self._option.reactor = command.log.enable and (command.log.all or command.log.reactor) self._option.daemon = command.log.enable and (command.log.all or command.log.daemon) self._option.processes = command.log.enable and (command.log.all or command.log.processes) self._option.configuration = command.log.enable and (command.log.all or command.log.configuration) self._option.network = command.log.enable and (command.log.all or command.log.network) self._option.wire = command.log.enable and (command.log.all or command.log.packets) self._option.message = command.log.enable and (command.log.all or command.log.message) self._option.rib = command.log.enable and (command.log.all or command.log.rib) self._option.timer = command.log.enable and (command.log.all or command.log.timers) self._option.routes = command.log.enable and (command.log.all or command.log.routes) self._option.parser = command.log.enable and (command.log.all or command.log.parser) if not command.log.enable: return self.destination = command.log.destination self.restart(True)
def callback (): families = None lines_per_yield = environment.settings().api.chunk if last in ('routes', 'extensive', 'static', 'flow', 'l2vpn'): peers = list(reactor.peers) else: peers = [n for n in reactor.peers.keys() if 'neighbor %s' % last in n] for key in peers: peer = reactor.peers.get(key, None) if not peer: continue if advertised: families = peer.proto.negotiated.families if peer.proto else [] rib = peer.neighbor.rib.outgoing if rib_name == 'out' else peer.neighbor.rib.incoming routes = list(rib.cached_changes(families)) while routes: changes, routes = routes[:lines_per_yield], routes[lines_per_yield:] for change in changes: if isinstance(change.nlri, route_type): if extensive: reactor.processes.answer(service,'%s %s %s' % (peer.neighbor.name(),'%s %s' % change.nlri.family(),change.extensive()),force=True) else: reactor.processes.answer(service,'neighbor %s %s %s' % (peer.neighbor.peer_address,'%s %s' % change.nlri.family(),str(change.nlri)),force=True) yield True reactor.processes.answer_done(service)
def __init__ (self, peer): try: self.logger = Logger() except RuntimeError: self.logger = FakeLogger() self.peer = peer self.neighbor = peer.neighbor self.negotiated = Negotiated(self.neighbor) self.connection = None if self.neighbor.connect: self.port = self.neighbor.connect elif os.environ.get('exabgp.tcp.port','').isdigit(): self.port = int(os.environ.get('exabgp.tcp.port')) elif os.environ.get('exabgp_tcp_port','').isdigit(): self.port = int(os.environ.get('exabgp_tcp_port')) else: self.port = 179 # XXX: FIXME: check the the -19 is correct (but it is harmless) # The message size is the whole BGP message _without_ headers self.message_size = Message.MAX_LEN-Message.HEADER_LEN from exabgp.configuration.environment import environment self.log_routes = environment.settings().log.routes
def debug_self_check (self): # we are not really running the program, just want check the configuration validity if environment.settings().debug.selfcheck: from exabgp.configuration.check import check_neighbor if check_neighbor(self.neighbors): sys.exit(0) sys.exit(1)
def __init__ (self, location=None,production=True): from exabgp.configuration.environment import environment self.location = location or environment.settings().api.socket self.unix = None self.sock = None self._production = production
def daemonise (self): if not self.daemonize: return log = environment.settings().log if log.enable and log.destination.lower() in ('stdout','stderr'): self.logger.daemon('ExaBGP can not fork when logs are going to %s' % log.destination.lower(),'critical') return def fork_exit (): try: pid = os.fork() if pid > 0: os._exit(0) except OSError as exc: self.logger.reactor('Can not fork, errno %d : %s' % (exc.errno,exc.strerror),'critical') # do not detach if we are already supervised or run by init like process if self._is_socket(sys.__stdin__.fileno()) or os.getppid() == 1: return fork_exit() os.setsid() fork_exit() self.silence()
def __init__ (self,reactor): self.logger = Logger() self.reactor = reactor self.clean() self.silence = False from exabgp.configuration.environment import environment self.highres = environment.settings().api.highres
def __init__ (self, reactor): self.logger = Logger() self.reactor = reactor self.clean() self.silence = False from exabgp.configuration.environment import environment self.highres = environment.settings().api.highres
def _accept (self): self._incoming.fsm.change(FSM.CONNECT) # we can do this as Protocol is a mutable object proto = self._incoming.proto # send OPEN message = Message.CODE.NOP for message in proto.new_open(self._restarted): if ord(message.TYPE) == Message.CODE.NOP: yield ACTION.NOW proto.negotiated.sent(message) self._incoming.fsm.change(FSM.OPENSENT) # Read OPEN wait = environment.settings().bgp.openwait opentimer = ReceiveTimer(self.me,wait,1,1,'waited for open too long, we do not like stuck in active') # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check without going to the other peer for message in proto.read_open(self.neighbor.peer_address.top()): opentimer.check_ka(message) if ord(message.TYPE) == Message.CODE.NOP: yield ACTION.LATER self._incoming.fsm.change(FSM.OPENCONFIRM) proto.negotiated.received(message) proto.validate_open() if self._outgoing.fsm == FSM.OPENCONFIRM: self.logger.network('incoming connection finds the outgoing connection is in openconfirm') local_id = self.neighbor.router_id.pack() remote_id = proto.negotiated.received_open.router_id.pack() if local_id < remote_id: self.logger.network('closing the outgoing connection') self._stop(self._outgoing,'collision local id < remote id') yield ACTION.LATER else: self.logger.network('aborting the incoming connection') raise Interrupted(self._incoming) # Send KEEPALIVE for message in self._incoming.proto.new_keepalive('OPENCONFIRM'): yield ACTION.NOW # Start keeping keepalive timer self.recv_timer = ReceiveTimer(self.me,proto.negotiated.holdtime,4,0) # Read KEEPALIVE for message in proto.read_keepalive(): self.recv_timer.check_ka(message) yield ACTION.NOW self._incoming.fsm.change(FSM.ESTABLISHED) # let the caller know that we were sucesfull yield ACTION.NOW
def __init__(self, error, logger): self.error = error self.logger = logger self.capability = ParseCapability(error) self.fifo = environment.settings().api.file self.neighbors = {} self._neighbors = {} self._previous = {}
def __init__ (self, error, logger): self.error = error self.logger = logger self.capability = ParseCapability(error) self.fifo = environment.settings().api.file self.neighbors = {} self._neighbors = {} self._previous = {}
def reload(self): try: return self._reload() except KeyboardInterrupt: return self.error.set( 'configuration reload aborted by ^C or SIGINT') except Error as exc: if environment.settings().debug.configuration: raise return self.error.set( 'problem parsing configuration file line %d\n' 'error message: %s' % (self.tokeniser.index_line, exc)) except StandardError as exc: if environment.settings().debug.configuration: raise return self.error.set( 'problem parsing configuration file line %d\n' 'error message: %s' % (self.tokeniser.index_line, exc))
def __init__(self, neighbor, reactor): try: self.logger = Logger() # We only to try to connect via TCP once self.once = environment.settings().tcp.once self.bind = True if environment.settings().tcp.bind else False except RuntimeError: self.logger = FakeLogger() self.once = False self.bind = True self.reactor = reactor self.neighbor = neighbor # The next restart neighbor definition self._neighbor = None # The peer should restart after a stop self._restart = True # The peer was restarted (to know what kind of open to send for graceful restart) self._restarted = FORCE_GRACEFUL # We want to remove routes which are not in the configuration anymote afte a signal to reload self._reconfigure = True # We want to send all the known routes self._resend_routes = SEND.DONE # We have new routes for the peers self._have_routes = True # We have been asked to teardown the session with this code self._teardown = None self._delay = Delay() self.recv_timer = None self._incoming = Direction('in', self._accept, FSM(FSM.IDLE), None, False, False) self._outgoing = Direction( 'out', self._connect, FSM(FSM.IDLE), None, None if not self.neighbor.passive else False, None if not self.neighbor.passive else False) self._incoming.opposite = self._outgoing self._outgoing.opposite = self._incoming
def __init__ (self, neighbor, reactor): try: self.logger = Logger() # We only to try to connect via TCP once self.once = environment.settings().tcp.once self.bind = True if environment.settings().tcp.bind else False except RuntimeError: self.logger = FakeLogger() self.once = False self.bind = True now = time.time() self.reactor = reactor self.neighbor = neighbor # The next restart neighbor definition self._neighbor = None self.proto = None self.fsm = FSM(self,FSM.IDLE) self.stats = { 'fsm': self.fsm, 'creation': now, 'complete': now, } self.generator = None # The peer should restart after a stop self._restart = True # The peer was restarted (to know what kind of open to send for graceful restart) self._restarted = FORCE_GRACEFUL # We want to remove routes which are not in the configuration anymote afte a signal to reload self._reconfigure = True # We want to send all the known routes self._resend_routes = SEND.DONE # We have new routes for the peers self._have_routes = True # We have been asked to teardown the session with this code self._teardown = None self._delay = Delay() self.recv_timer = None
def _accept (self): # we can do this as Protocol is a mutable object proto = self._['in']['proto'] # send OPEN for message in proto.new_open(self._restarted): if ord(message.TYPE) == Message.Type.NOP: yield ACTION.immediate proto.negotiated.sent(message) self._['in']['state'] = STATE.opensent # Read OPEN wait = environment.settings().bgp.openwait opentimer = Timer(self._log('in'),wait,1,1,'waited for open too long, we do not like stuck in active') # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check without going to the other peer for message in proto.read_open(self.neighbor.peer_address.ip): opentimer.tick(message) if ord(message.TYPE) == Message.Type.NOP: yield ACTION.later self._['in']['state'] = STATE.openconfirm proto.negotiated.received(message) proto.validate_open() if self._['out']['state'] == STATE.openconfirm: self.logger.network('incoming connection finds the outgoing connection is in openconfirm') local_id = self.neighbor.router_id.packed remote_id = proto.negotiated.received_open.router_id.packed if local_id < remote_id: self.logger.network('closing the outgoing connection') self._stop('out','collision local id < remote id') yield ACTION.later else: self.logger.network('aborting the incoming connection') stop = Interrupted() stop.direction = 'in' raise stop # Send KEEPALIVE for message in self._['in']['proto'].new_keepalive('OPENCONFIRM'): yield ACTION.immediate # Start keeping keepalive timer self.timer = Timer(self._log('in'),proto.negotiated.holdtime,4,0) # Read KEEPALIVE for message in proto.read_keepalive('ESTABLISHED'): self.timer.tick(message) yield ACTION.later self._['in']['state'] = STATE.established # let the caller know that we were sucesfull yield ACTION.immediate
def _accept (self): # we can do this as Protocol is a mutable object proto = self._['in']['proto'] # send OPEN for message in proto.new_open(self._restarted): if ord(message.TYPE) == Message.ID.NOP: yield ACTION.immediate proto.negotiated.sent(message) self._['in']['state'] = STATE.opensent # Read OPEN wait = environment.settings().bgp.openwait opentimer = ReceiveTimer(self.me,wait,1,1,'waited for open too long, we do not like stuck in active') # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check without going to the other peer for message in proto.read_open(self.neighbor.peer_address.ip): opentimer.check_ka(message) if ord(message.TYPE) == Message.ID.NOP: yield ACTION.later self._['in']['state'] = STATE.openconfirm proto.negotiated.received(message) proto.validate_open() if self._['out']['state'] == STATE.openconfirm: self.logger.network('incoming connection finds the outgoing connection is in openconfirm') local_id = self.neighbor.router_id.packed remote_id = proto.negotiated.received_open.router_id.packed if local_id < remote_id: self.logger.network('closing the outgoing connection') self._stop('out','collision local id < remote id') yield ACTION.later else: self.logger.network('aborting the incoming connection') stop = Interrupted() stop.direction = 'in' raise stop # Send KEEPALIVE for message in self._['in']['proto'].new_keepalive('OPENCONFIRM'): yield ACTION.immediate # Start keeping keepalive timer self.recv_timer = ReceiveTimer(self.me,proto.negotiated.holdtime,4,0) # Read KEEPALIVE for message in proto.read_keepalive(): self.recv_timer.check_ka(message) yield ACTION.immediate self._['in']['state'] = STATE.established # let the caller know that we were sucesfull yield ACTION.immediate
def __init__(self): # cached representation of the object self._str = '' self._idx = '' self._json = '' # The parsed attributes have no mp routes and/or those are last self.cacheable = True # XXX: FIXME: surely not the best place for this Attribute.caching = environment.settings().cache.attributes
def __init__ (self, neighbor, reactor): try: self.logger = Logger() # We only to try to connect via TCP once self.once = environment.settings().tcp.once self.bind = True if environment.settings().tcp.bind else False except RuntimeError: self.logger = FakeLogger() self.once = False self.bind = True now = time.time() self.reactor = reactor self.neighbor = neighbor # The next restart neighbor definition self._neighbor = None self.proto = None self.fsm = FSM(self,FSM.IDLE) self.stats = { 'fsm': self.fsm, 'creation': now, 'reset': now, 'complete': 0, } self.generator = None # The peer should restart after a stop self._restart = True # The peer was restarted (to know what kind of open to send for graceful restart) self._restarted = FORCE_GRACEFUL # We want to remove routes which are not in the configuration anymore after a signal to reload self._reconfigure = True # We want to send all the known routes self._resend_routes = SEND.DONE # We have been asked to teardown the session with this code self._teardown = None self._delay = Delay() self.recv_timer = None
def reload (self): try: return self._reload() except KeyboardInterrupt: return self.error.set('configuration reload aborted by ^C or SIGINT') except Error as exc: if environment.settings().debug.configuration: raise return self.error.set( 'problem parsing configuration file line %d\n' 'error message: %s' % (self.tokeniser.index_line, exc) ) except StandardError as exc: if environment.settings().debug.configuration: raise return self.error.set( 'problem parsing configuration file line %d\n' 'error message: %s' % (self.tokeniser.index_line, exc) )
def __init__ (self): # cached representation of the object self._str = '' self._idx = '' self._json = '' # The parsed attributes have no mp routes and/or those are last self.cacheable = True # XXX: FIXME: surely not the best place for this Attribute.caching = environment.settings().cache.attributes
def __init__ (self, configurations): self.ip = environment.settings().tcp.bind self.port = environment.settings().tcp.port self.respawn = environment.settings().api.respawn self.max_loop_time = environment.settings().reactor.speed self.early_drop = environment.settings().daemon.drop self.logger = Logger() self.daemon = Daemon(self) self.processes = None self.listener = None self.configuration = Configuration(configurations) self.api = API(self) self.peers = {} self.route_update = False self._stopping = environment.settings().tcp.once self._shutdown = False self._reload = False self._reload_processes = False self._restart = False self._saved_pid = False self._pending = deque() self._running = None signal.signal(signal.SIGTERM, self.sigterm) signal.signal(signal.SIGHUP, self.sighup) signal.signal(signal.SIGALRM, self.sigalrm) signal.signal(signal.SIGUSR1, self.sigusr1) signal.signal(signal.SIGUSR2, self.sigusr2)
def __init__(self, configurations): self.ip = environment.settings().tcp.bind self.port = environment.settings().tcp.port self.respawn = environment.settings().api.respawn self.max_loop_time = environment.settings().reactor.speed self.early_drop = environment.settings().daemon.drop self.logger = Logger() self.daemon = Daemon(self) self.processes = None self.listener = None self.configuration = Configuration(configurations) self.api = API(self) self.peers = {} self.route_update = False self._stopping = environment.settings().tcp.once self._shutdown = False self._reload = False self._reload_processes = False self._restart = False self._saved_pid = False self._pending = deque() self._running = None signal.signal(signal.SIGTERM, self.sigterm) signal.signal(signal.SIGHUP, self.sighup) signal.signal(signal.SIGALRM, self.sigalrm) signal.signal(signal.SIGUSR1, self.sigusr1) signal.signal(signal.SIGUSR2, self.sigusr2)
def __init__(self): # cached representation of the object self._str = "" self._idx = "" self._json = "" # We should cache the attributes parsed self.cache_attributes = environment.settings().cache.attributes # some of the attributes are MP_REACH_NLRI or MP_UNREACH_NLRI self.hasmp = 0 # The parsed attributes have no mp routes and/or those are last self.cacheable = True # for the last route, the part of the attributes which are not routes we can use for fast caching self.prefix = ""
def __init__(self, configurations): self._ips = environment.settings().tcp.bind self._port = environment.settings().tcp.port self._stopping = environment.settings().tcp.once self.max_loop_time = environment.settings().reactor.speed self.early_drop = environment.settings().daemon.drop self.processes = None self.configuration = Configuration(configurations) self.logger = Logger() self. async = ASYNC() self.signal = Signal() self.daemon = Daemon(self) self.listener = Listener(self) self.api = API(self) self.peers = {} self._reload_processes = False self._saved_pid = False
def _read_open (self): wait = environment.settings().bgp.openwait opentimer = ReceiveTimer(self.proto.connection.session,wait,1,1,'waited for open too long, we do not like stuck in active') # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check without going to the other peer for message in self.proto.read_open(self.neighbor.peer_address.top()): opentimer.check_ka(message) # XXX: FIXME: change the whole code to use the ord and not the chr version # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check if ordinal(message.TYPE) == Message.CODE.NOP: yield ACTION.NOW yield message
def __init__(self): # cached representation of the object self._str = '' self._idx = '' self._json = '' # We should cache the attributes parsed self.cache_attributes = environment.settings().cache.attributes # some of the attributes are MP_REACH_NLRI or MP_UNREACH_NLRI self.hasmp = 0 # The parsed attributes have no mp routes and/or those are last self.cacheable = True # for the last route, the part of the attributes which are not routes we can use for fast caching self.prefix = ''
def __init__ (self): command = environment.settings() self.short = command.log.short self.level = command.log.level self._pdb = command.debug.pdb self._reactor = command.log.enable and (command.log.all or command.log.reactor) self._daemon = command.log.enable and (command.log.all or command.log.daemon) self._processes = command.log.enable and (command.log.all or command.log.processes) self._configuration = command.log.enable and (command.log.all or command.log.configuration) self._network = command.log.enable and (command.log.all or command.log.network) self._wire = command.log.enable and (command.log.all or command.log.packets) self._message = command.log.enable and (command.log.all or command.log.message) self._rib = command.log.enable and (command.log.all or command.log.rib) self._timer = command.log.enable and (command.log.all or command.log.timers) self._routes = command.log.enable and (command.log.all or command.log.routes) self._parser = command.log.enable and (command.log.all or command.log.parser) if not command.log.enable: return destination = command.log.destination try: if destination in ('','syslog'): if sys.platform == "darwin": address = '/var/run/syslog' else: address = '/dev/log' if not os.path.exists(address): address = ('localhost', 514) handler = logging.handlers.SysLogHandler(address) elif destination.lower().startswith('host:'): # If the address is invalid, each syslog call will print an error. # See how it can be avoided, as the socket error is encapsulated and not returned address = (destination[5:].strip(), 514) handler = logging.handlers.SysLogHandler(address) else: if destination.lower() == 'stdout': handler = logging.StreamHandler(sys.stdout) elif destination.lower() == 'stderr': handler = logging.StreamHandler(sys.stderr) else: handler = logging.handlers.RotatingFileHandler(destination, maxBytes=5*1024*1024, backupCount=5) self._syslog = logging.getLogger() self._syslog.setLevel(logging.DEBUG) self._syslog.addHandler(handler) except IOError: self.critical('Can not use SYSLOG, failing back to stdout')
def __init__(self): command = environment.settings() self.short = command.log.short self.level = command.log.level self._pdb = command.debug.pdb self._reactor = command.log.enable and (command.log.all or command.log.reactor) self._daemon = command.log.enable and (command.log.all or command.log.daemon) self._processes = command.log.enable and (command.log.all or command.log.processes) self._configuration = command.log.enable and (command.log.all or command.log.configuration) self._network = command.log.enable and (command.log.all or command.log.network) self._wire = command.log.enable and (command.log.all or command.log.packets) self._message = command.log.enable and (command.log.all or command.log.message) self._rib = command.log.enable and (command.log.all or command.log.rib) self._timer = command.log.enable and (command.log.all or command.log.timers) self._routes = command.log.enable and (command.log.all or command.log.routes) self._parser = command.log.enable and (command.log.all or command.log.parser) if not command.log.enable: return destination = command.log.destination try: if destination in ("", "syslog"): if sys.platform == "darwin": address = "/var/run/syslog" else: address = "/dev/log" if not os.path.exists(address): address = ("localhost", 514) handler = logging.handlers.SysLogHandler(address) elif destination.lower().startswith("host:"): # If the address is invalid, each syslog call will print an error. # See how it can be avoided, as the socket error is encapsulated and not returned address = (destination[5:].strip(), 514) handler = logging.handlers.SysLogHandler(address) else: if destination.lower() == "stdout": handler = logging.StreamHandler(sys.stdout) elif destination.lower() == "stderr": handler = logging.StreamHandler(sys.stderr) else: handler = logging.handlers.RotatingFileHandler(destination, maxBytes=5 * 1024 * 1024, backupCount=5) self._syslog = logging.getLogger() self._syslog.setLevel(logging.DEBUG) self._syslog.addHandler(handler) except IOError: self.critical("Can not use SYSLOG, failing back to stdout")
def _read_open (self): wait = environment.settings().bgp.openwait opentimer = ReceiveTimer(self.proto.connection.session,wait,1,1,'waited for open too long, we do not like stuck in active') # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check without going to the other peer for message in self.proto.read_open(self.neighbor.peer_address.top()): opentimer.check_ka(message) # XXX: FIXME: change the whole code to use the ord and not the chr version # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check if ordinal(message.TYPE) == Message.CODE.NOP: # If a peer does not reply to OPEN message, or not enough bytes # yielding ACTION.NOW can cause ExaBGP to busy spin trying to # read from peer. See GH #723 . yield ACTION.LATER yield message
def daemonise (self): if not self.daemonize: return log = environment.settings().log if log.enable and log.destination.lower() in ('stdout','stderr'): self.logger.daemon('ExaBGP can not fork when logs are going to %s' % log.destination.lower(),'critical') return def fork_exit (): try: pid = os.fork() if pid > 0: os._exit(0) except OSError, e: self.logger.reactor('Can not fork, errno %d : %s' % (e.errno,e.strerror),'critical')
def __init__ (self, afi, peer, local): # peer and local are strings of the IP try: self.defensive = environment.settings().debug.defensive self.logger = Logger() except RuntimeError: self.defensive = True self.logger = FakeLogger() self.afi = afi self.peer = peer self.local = local self.io = None self.established = False self.id = self.identifier.get(self.direction,1)
def callback (): lines_per_yield = environment.settings().api.chunk if last in ('routes', 'extensive', 'static', 'flow', 'l2vpn'): peers = reactor.peers() else: peers = [n for n in reactor.peers() if 'neighbor %s' % last in n] for key in peers: routes = reactor.neighor_rib(key, rib_name, advertised) while routes: changes, routes = routes[:lines_per_yield], routes[lines_per_yield:] for change in changes: if isinstance(change.nlri, route_type): if extensive: reactor.processes.write(service,'%s %s %s' % (reactor.neighbor_name(key),'%s %s' % change.nlri.family(),change.extensive())) else: reactor.processes.write(service,'neighbor %s %s %s' % (reactor.neighbor_ip(key),'%s %s' % change.nlri.family(),str(change.nlri))) yield True reactor.processes.answer_done(service)
def __init__ (self, peer): try: self.logger = Logger() except RuntimeError: self.logger = FakeLogger() self.peer = peer self.neighbor = peer.neighbor self.negotiated = Negotiated(self.neighbor) self.connection = None if self.neighbor.connect: self.port = self.neighbor.connect elif os.environ.get('exabgp.tcp.port','').isdigit(): self.port = int(os.environ.get('exabgp.tcp.port')) elif os.environ.get('exabgp_tcp_port','').isdigit(): self.port = int(os.environ.get('exabgp_tcp_port')) else: self.port = 179 from exabgp.configuration.environment import environment self.log_routes = peer.neighbor.adj_rib_in or environment.settings().log.routes
def __init__(self, afi, peer, local): self.msg_size = ExtendedMessage.INITIAL_SIZE # peer and local are strings of the IP try: self.defensive = environment.settings().debug.defensive self.logger = Logger() except RuntimeError: self.defensive = True self.logger = FakeLogger() self.afi = afi self.peer = peer self.local = local self.io = None self.established = False self._rpoller = {} self._wpoller = {} self.id = self.identifier.get(self.direction, 1)
def _connect (self): # try to establish the outgoing connection proto = Protocol(self) generator = proto.connect() connected = False try: while not connected: if self._teardown: raise StopIteration() connected = generator.next() # we want to come back as soon as possible yield ACTION.LATER except StopIteration: # Connection failed if not connected: proto.close('connection to peer failed',self._['in']['state'] != STATE.ESTABLISHED) # A connection arrived before we could establish ! if not connected or self._['in']['proto']: stop = Interrupted() stop.direction = 'out' yield ACTION.NOW raise stop self._['out']['state'] = STATE.CONNECT self._['out']['proto'] = proto # send OPEN # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to set the state without going to the other peer message = Message.CODE.NOP for message in proto.new_open(self._restarted): if ord(message.TYPE) == Message.CODE.NOP: yield ACTION.NOW proto.negotiated.sent(message) self._['out']['state'] = STATE.OPENSENT # Read OPEN wait = environment.settings().bgp.openwait opentimer = ReceiveTimer(self.me,wait,1,1,'waited for open too long, we do not like stuck in active') for message in self._['out']['proto'].read_open(self.neighbor.peer_address.ip): opentimer.check_ka(message) # XXX: FIXME: change the whole code to use the ord and not the chr version # Only yield if we have not the open, otherwise the reactor can run the other connection # which would be bad as we need to do the collission check if ord(message.TYPE) == Message.CODE.NOP: yield ACTION.LATER self._['out']['state'] = STATE.OPENCONFIRM proto.negotiated.received(message) proto.validate_open() if self._['in']['state'] == STATE.OPENCONFIRM: self.logger.network('outgoing connection finds the incoming connection is in openconfirm') local_id = self.neighbor.router_id.packed remote_id = proto.negotiated.received_open.router_id.packed if local_id < remote_id: self.logger.network('aborting the outgoing connection') stop = Interrupted() stop.direction = 'out' raise stop else: self.logger.network('closing the incoming connection') self._stop('in','collision local id < remote id') yield ACTION.LATER # Send KEEPALIVE for message in proto.new_keepalive('OPENCONFIRM'): yield ACTION.NOW # Start keeping keepalive timer self.recv_timer = ReceiveTimer(self.me,proto.negotiated.holdtime,4,0) # Read KEEPALIVE for message in self._['out']['proto'].read_keepalive(): self.recv_timer.check_ka(message) yield ACTION.NOW self._['out']['state'] = STATE.ESTABLISHED # let the caller know that we were sucesfull yield ACTION.NOW
def __init__(self, version): self.version = version self.time = nop self.compact = environment.settings().api.compact
def run(self, validate, root): self.daemon.daemonise() # Make sure we create processes once we have closed file descriptor # unfortunately, this must be done before reading the configuration file # so we can not do it with dropped privileges self.processes = Processes() # we have to read the configuration possibly with root privileges # as we need the MD5 information when we bind, and root is needed # to bind to a port < 1024 # this is undesirable as : # - handling user generated data as root should be avoided # - we may not be able to reload the configuration once the privileges are dropped # but I can not see any way to avoid it for ip in self._ips: if not self.listener.listen_on(ip, None, self._port, None, False, None): return self.Exit.listening if not self.load(): return self.Exit.configuration if validate: # only validate configuration self.logger.warning('', 'configuration') self.logger.warning('parsed Neighbors, un-templated', 'configuration') self.logger.warning('------------------------------', 'configuration') self.logger.warning('', 'configuration') for key in self._peers: self.logger.warning(str(self._peers[key].neighbor), 'configuration') self.logger.warning('', 'configuration') return self.Exit.validate for neighbor in self.configuration.neighbors.values(): if neighbor.listen: if not self.listener.listen_on( neighbor.md5_ip, neighbor.peer_address, neighbor.listen, neighbor.md5_password, neighbor.md5_base64, neighbor.ttl_in): return self.Exit.listening if not self.early_drop: self.processes.start(self.configuration.processes) if not self.daemon.drop_privileges(): self.logger.critical( 'could not drop privileges to \'%s\' refusing to run as root' % self.daemon.user, 'reactor') self.logger.critical( 'set the environmemnt value exabgp.daemon.user to change the unprivileged user', 'reactor') return self.Exit.privileges if self.early_drop: self.processes.start(self.configuration.processes) # This is required to make sure we can write in the log location as we now have dropped root privileges if not self.logger.restart(): self.logger.critical('could not setup the logger, aborting', 'reactor') return self.Exit.log if not self.daemon.savepid(): return self.Exit.pid # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ? reload_completed = False wait = environment.settings().tcp.delay if wait: sleeptime = (wait * 60) - int(time.time()) % (wait * 60) self.logger.debug( 'waiting for %d seconds before connecting' % sleeptime, 'reactor') time.sleep(float(sleeptime)) workers = {} peers = set() api_fds = [] ms_sleep = int(self._sleep_time * 1000) while True: try: if self.signal.received: for key in self._peers: if self._peers[key].neighbor.api['signal']: self._peers[key].reactor.processes.signal( self._peers[key].neighbor, self.signal.number) signaled = self.signal.received self.signal.rearm() if signaled == Signal.SHUTDOWN: self.exit_code = self.Exit.normal self.shutdown() break if signaled == Signal.RESTART: self.restart() continue if not reload_completed: continue if signaled == Signal.FULL_RELOAD: self._reload_processes = True if signaled in (Signal.RELOAD, Signal.FULL_RELOAD): self.load() self.processes.start(self.configuration.processes, self._reload_processes) self._reload_processes = False continue if self.listener.incoming(): # check all incoming connection self.asynchronous.schedule( str(uuid.uuid1()), 'checking for new connection(s)', self.listener.new_connections()) peers = self.active_peers() if self._completed(peers): reload_completed = True sleep = ms_sleep # do not attempt to listen on closed sockets even if the peer is still here for io in list(workers.keys()): if io == -1: self._poller.unregister(io) del workers[io] # give a turn to all the peers for key in list(peers): peer = self._peers[key] # limit the number of message handling per second if self._rate_limited(key, peer.neighbor.rate_limit): peers.discard(key) continue # handle the peer action = peer.run() # .run() returns an ACTION enum: # * immediate if it wants to be called again # * later if it should be called again but has no work atm # * close if it is finished and is closing down, or restarting if action == ACTION.CLOSE: if key in self._peers: del self._peers[key] peers.discard(key) # we are loosing this peer, not point to schedule more process work elif action == ACTION.LATER: io = peer.socket() if io != -1: self._poller.register( io, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLNVAL | select.POLLERR) workers[io] = key # no need to come back to it before a a full cycle peers.discard(key) elif action == ACTION.NOW: sleep = 0 if not peers: break # read at least on message per process if there is some and parse it for service, command in self.processes.received(): self.api.text(self, service, command) sleep = 0 self.asynchronous.run() if api_fds != self.processes.fds: for fd in api_fds: if fd == -1: continue if fd not in self.processes.fds: self._poller.unregister(fd) for fd in self.processes.fds: if fd == -1: continue if fd not in api_fds: self._poller.register( fd, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLNVAL | select.POLLERR) api_fds = self.processes.fds for io in self._wait_for_io(sleep): if io not in api_fds: peers.add(workers[io]) if self._stopping and not self._peers.keys(): self._termination('exiting on peer termination', self.Exit.normal) except KeyboardInterrupt: self._termination('^C received', self.Exit.normal) except SystemExit: self._termination('exiting', self.Exit.normal) # socket.error is a subclass of IOError (so catch it first) except socket.error: self._termination('socket error received', self.Exit.socket) except IOError: self._termination( 'I/O Error received, most likely ^C during IO', self.Exit.io_error) except ProcessError: self._termination( 'Problem when sending message(s) to helper program, stopping', self.Exit.process) except select.error: self._termination('problem using select, stopping', self.Exit.select) return self.exit_code