def pack (self): rs = [] for k,capabilities in self.iteritems(): for capability in capabilities.extract(): rs.append(concat_strs(chr_(k),chr_(len(capability)),capability)) parameters = b''.join([concat_strs(chr_(2),chr_(len(r)),r) for r in rs]) return concat_strs(chr_(len(parameters)),parameters)
def __init__ (self, packed=None, integer=None, ip=None): if packed: self.path_info = packed elif ip: self.path_info = b''.join([chr_(int(_)) for _ in ip.split('.')]) elif integer: self.path_info = b''.join([chr_((integer >> offset) & 0xff) for offset in [24,16,8,0]]) else: self.path_info = b''
def pack (self, negotiated=None): flag = self.FLAG length = len(self.data) if length > 0xFF: flag |= Attribute.Flag.EXTENDED_LENGTH if flag & Attribute.Flag.EXTENDED_LENGTH: len_value = pack('!H',length) else: len_value = chr_(length) return concat_strs(chr_(flag),chr_(self.ID),len_value,self.data)
def pack(self, negotiated=None): flag = self.FLAG length = len(self.data) if length > 0xFF: flag |= Attribute.Flag.EXTENDED_LENGTH if flag & Attribute.Flag.EXTENDED_LENGTH: len_value = pack('!H', length) else: len_value = chr_(length) return concat_strs(chr_(flag), chr_(self.ID), len_value, self.data)
def __init__(self, packed=None, integer=None, ip=None): if packed: self.path_info = packed elif ip: self.path_info = b''.join([chr_(int(_)) for _ in ip.split('.')]) elif integer: self.path_info = b''.join([ chr_((integer >> offset) & 0xff) for offset in [24, 16, 8, 0] ]) else: self.path_info = b''
def _attribute(self, value): flag = self.FLAG if flag & Attribute.Flag.OPTIONAL and not value: return b'' length = len(value) if length > 0xFF: flag |= Attribute.Flag.EXTENDED_LENGTH if flag & Attribute.Flag.EXTENDED_LENGTH: len_value = pack('!H', length) else: len_value = chr_(length) return concat_strs(chr_(flag), chr_(self.ID), len_value, value)
def _attribute (self, value): flag = self.FLAG if flag & Attribute.Flag.OPTIONAL and not value: return b'' length = len(value) if length > 0xFF: flag |= Attribute.Flag.EXTENDED_LENGTH if flag & Attribute.Flag.EXTENDED_LENGTH: len_value = pack('!H',length) else: len_value = chr_(length) return concat_strs(chr_(flag),chr_(self.ID),len_value,value)
class ESI(object): DEFAULT = b''.join(chr_(0) for _ in range(0, 10)) MAX = b''.join(chr_(0xFF) for _ in range(0, 10)) __slots__ = ['esi'] def __init__(self, esi=None): self.esi = self.DEFAULT if esi is None else esi if len(self.esi) != 10: raise Exception("incorrect ESI, len %d instead of 10" % len(esi)) def __eq__(self, other): return self.esi == other.esi def __neq__(self, other): return self.esi != other.esi def __lt__(self, other): raise RuntimeError('comparing ESI for ordering does not make sense') def __le__(self, other): raise RuntimeError('comparing ESI for ordering does not make sense') def __gt__(self, other): raise RuntimeError('comparing ESI for ordering does not make sense') def __ge__(self, other): raise RuntimeError('comparing ESI for ordering does not make sense') def __str__(self): if self.esi == self.DEFAULT: return "-" return ":".join('%02x' % ord_(_) for _ in self.esi) def __repr__(self): return self.__str__() def pack(self): return self.esi def __len__(self): return 10 def __hash__(self): return hash(self.esi) @classmethod def unpack(cls, data): return cls(data[:10]) def json(self, compact=None): return '"esi": "%s"' % str(self)
def pack(self, negotiated=None): addpath = self.path_info.pack( ) if negotiated and negotiated.addpath.send(self.afi, self.safi) else b'' mask = chr_(len(self.labels) * 8 + len(self.rd) * 8 + self.cidr.mask) return addpath + mask + self.labels.pack() + self.rd.pack( ) + self.cidr.pack_ip()
def test101_EVPNHashEqual_somefieldsvary(self): ''' Two EVPN MAC NLRIs differing by their ESI or label or RD, or nexthop, but otherwise identical should hash to the same value, and be equal ''' nlri0 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6 * 8, Labels([42], True), IP.create("1.1.1.1")) # ESI nlri1 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(b''.join(chr_(1) for _ in range(0, 10))), EthernetTag(111), MAC("01:02:03:04:05:06"), 6 * 8, Labels([42], True), IP.create("1.1.1.1")) # label nlri2 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6 * 8, Labels([4444], True), IP.create("1.1.1.1")) # IP: different IPs, but same MACs: different route nlri3 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6 * 8, Labels([42], True), IP.create("2.2.2.2")) # with a next hop... nlri4 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6 * 8, Labels([42], True), IP.create("1.1.1.1"), IP.pton("10.10.10.10")) nlri5 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6 * 8, Labels([42], True), IP.create("1.1.1.1"), IP.pton("11.11.11.11")) self.assertEqual(hash(nlri0), hash(nlri1)) self.assertEqual(hash(nlri0), hash(nlri2)) self.assertEqual(hash(nlri0), hash(nlri4)) self.assertEqual(nlri0, nlri1) self.assertEqual(nlri0, nlri2) self.assertEqual(nlri0, nlri4) self.assertEqual(nlri1, nlri2) self.assertEqual(nlri1, nlri4) self.assertEqual(nlri2, nlri4) self.assertEqual(nlri4, nlri5) self.assertNotEqual(hash(nlri0), hash(nlri3)) self.assertNotEqual(nlri0, nlri3) self.assertNotEqual(nlri1, nlri3) self.assertNotEqual(nlri2, nlri3) self.assertNotEqual(nlri3, nlri4)
def _pack(self, packed=None): if self._packed: return self._packed if packed: self._packed = packed return packed self._packed = concat_strs( self.rd.pack(), self.esi.pack(), self.etag.pack(), chr_(self.maclen), # only 48 supported by the draft self.mac.pack(), chr_(len(self.ip) * 8 if self.ip else 0), self.ip.pack() if self.ip else b'', self.label.pack()) return self._packed
def unpack (cls, data): labels = [] while len(data): label = unpack('!L',chr_(0)+data[:3])[0] data=data[3:] labels.append(label >> 4) if label & 0x001: break return cls(labels)
def _pack (self, packed=None): if self._packed: return self._packed if packed: self._packed = packed return packed self._packed = concat_strs( self.rd.pack(), self.esi.pack(), self.etag.pack(), chr_(self.maclen), # only 48 supported by the draft self.mac.pack(), chr_(len(self.ip)*8 if self.ip else 0), self.ip.pack() if self.ip else b'', self.label.pack() ) return self._packed
def unpack_nlri(cls, afi, safi, bgp, action, addpath): nlri = cls(afi, safi, action) if addpath: nlri.path_info = PathInfo(bgp[:4]) bgp = bgp[4:] mask = ord_(bgp[0]) bgp = bgp[1:] if cls.has_label(): labels = [] while bgp and mask >= 8: label = int(unpack('!L', chr_(0) + bgp[:3])[0]) bgp = bgp[3:] mask -= 24 # 3 bytes # The last 4 bits are the bottom of Stack # The last bit is set for the last label labels.append(label >> 4) # This is a route withdrawal if label == 0x800000 and action == IN.WITHDRAWN: break # This is a next-hop if label == 0x000000: break if label & 1: break nlri.labels = Labels(labels) if cls.has_rd(): mask -= 8 * 8 # the 8 bytes of the route distinguisher rd = bgp[:8] bgp = bgp[8:] nlri.rd = RouteDistinguisher(rd) if mask < 0: raise Notify(3, 10, 'invalid length in NLRI prefix') if not bgp and mask: raise Notify( 3, 10, 'not enough data for the mask provided to decode the NLRI') size = CIDR.size(mask) if len(bgp) < size: raise Notify( 3, 10, 'could not decode route with AFI %d sand SAFI %d' % (afi, safi)) network, bgp = bgp[:size], bgp[size:] nlri.cidr = CIDR(network + padding(IP.length(afi) - size), mask) return nlri, bgp
def _pack(self, packed=None): if self._packed: return self._packed if packed: self._packed = packed return packed self._packed = concat_strs(self.rd.pack(), self.esi.pack(), chr_(len(self.ip) * 8 if self.ip else 0), self.ip.pack() if self.ip else b'') return self._packed
def packed_attributes (self, negotiated, maximum=Negotiated.FREE_SIZE): if not self.nlris: return # addpath = negotiated.addpath.send(self.afi,self.safi) # nexthopself = negotiated.nexthopself(self.afi) mpnlri = {} for nlri in self.nlris: if nlri.family() != self.family(): # nlri is not part of specified family continue if nlri.nexthop is NoNextHop: # EOR and Flow may not have any next_hop nexthop = b'' else: # we do not want a next_hop attribute packed (with the _attribute()) but just the next_hop itself if nlri.safi.has_rd(): # .packed and not .pack() nexthop = chr_(0)*8 + nlri.nexthop.ton(negotiated,nlri.afi) else: # .packed and not .pack() nexthop = nlri.nexthop.ton(negotiated,nlri.afi) # mpunli[nexthop] = nlri mpnlri.setdefault(nexthop,[]).append(nlri.pack(negotiated)) for nexthop,nlris in mpnlri.iteritems(): payload = b''.join([self.afi.pack(), self.safi.pack(), chr(len(nexthop)), nexthop, chr(0)]) header_length = len(payload) for nlri in nlris: if self._len(payload + nlri) > maximum: if len(payload) == header_length or len(payload) > maximum: raise Notify(6, 0, 'attributes size is so large we can not even pack on MPRNLRI') yield self._attribute(payload) payload = b''.join([self.afi.pack(), self.safi.pack(), chr_(len(nexthop)), nexthop, chr_(0), nlri]) continue payload = b''.join([payload, nlri]) if len(payload) == header_length or len(payload) > maximum: raise Notify(6, 0, 'attributes size is so large we can not even pack on MPRNLRI') yield self._attribute(payload)
def unpack_nlri (cls, afi, safi, bgp, action, addpath): nlri = cls(afi,safi,action) if addpath: nlri.path_info = PathInfo(bgp[:4]) bgp = bgp[4:] mask = ord_(bgp[0]) bgp = bgp[1:] if cls.has_label(): labels = [] while bgp and mask >= 8: label = int(unpack('!L',chr_(0) + bgp[:3])[0]) bgp = bgp[3:] mask -= 24 # 3 bytes # The last 4 bits are the bottom of Stack # The last bit is set for the last label labels.append(label >> 4) # This is a route withdrawal if label == 0x800000 and action == IN.WITHDRAWN: break # This is a next-hop if label == 0x000000: break if label & 1: break nlri.labels = Labels(labels) if cls.has_rd(): mask -= 8*8 # the 8 bytes of the route distinguisher rd = bgp[:8] bgp = bgp[8:] nlri.rd = RouteDistinguisher(rd) if mask < 0: raise Notify(3,10,'invalid length in NLRI prefix') if not bgp and mask: raise Notify(3,10,'not enough data for the mask provided to decode the NLRI') size = CIDR.size(mask) if len(bgp) < size: raise Notify(3,10,'could not decode route with AFI %d and SAFI %d' % (afi,safi)) network,bgp = bgp[:size],bgp[size:] nlri.cidr = CIDR(network + padding(IP.length(afi)-size),mask) return nlri,bgp
def fromElements (cls, prefix, suffix): try: if '.' in prefix: data = [chr_(0),chr_(1)] data.extend([chr_(int(_)) for _ in prefix.split('.')]) data.extend([chr_(suffix >> 8),chr_(suffix & 0xFF)]) distinguisher = b''.join(data) else: number = int(prefix) if number < pow(2,16) and suffix < pow(2,32): distinguisher = chr_(0) + chr_(0) + pack('!H',number) + pack('!L',suffix) elif number < pow(2,32) and suffix < pow(2,16): distinguisher = chr_(0) + chr_(2) + pack('!L',number) + pack('!H',suffix) else: raise ValueError('invalid route-distinguisher %s' % number) return cls(distinguisher) except ValueError: raise ValueError('invalid route-distinguisher %s:%s' % (prefix,suffix))
def pack (self, negotiated=None): ordered_rules = [] # the order is a RFC requirement for ID in sorted(self.rules.keys()): rules = self.rules[ID] # for each component get all the operation to do # the format use does not prevent two opposing rules meaning that no packet can ever match for rule in rules: rule.operations &= (CommonOperator.EOL ^ 0xFF) rules[-1].operations |= CommonOperator.EOL # and add it to the last rule if ID not in (FlowDestination.ID,FlowSource.ID): ordered_rules.append(chr_(ID)) ordered_rules.append(b''.join(rule.pack() for rule in rules)) components = self.rd.pack() + b''.join(ordered_rules) l = len(components) if l < 0xF0: return concat_strs(chr_(l),components) if l < 0x0FFF: return concat_strs(pack('!H',l | 0xF000),components) raise Notify(3,0,"my administrator attempted to announce a Flow Spec rule larger than encoding allows, protecting the innocent the only way I can")
def _pack (self, packed=None): if self._packed: return self._packed if packed: self._packed = packed return packed self._packed = concat_strs( self.rd.pack(), self.esi.pack(), chr_(len(self.ip)*8 if self.ip else 0), self.ip.pack() if self.ip else b'' ) return self._packed
def _pack (self, packed=None): if self._packed: return self._packed if packed: self._packed = packed return packed self._packed = concat_strs( self.rd.pack(), self.etag.pack(), chr_(len(self.ip)*8), self.ip.pack() ) return self._packed
def fromElements(cls, prefix, suffix): try: if '.' in prefix: data = [chr_(0), chr_(1)] data.extend([chr_(int(_)) for _ in prefix.split('.')]) data.extend([chr_(suffix >> 8), chr_(suffix & 0xFF)]) distinguisher = b''.join(data) else: number = int(prefix) if number < pow(2, 16) and suffix < pow(2, 32): distinguisher = chr_(0) + chr_(0) + pack( '!H', number) + pack('!L', suffix) elif number < pow(2, 32) and suffix < pow(2, 16): distinguisher = chr_(0) + chr_(2) + pack( '!L', number) + pack('!H', suffix) else: raise ValueError('invalid route-distinguisher %s' % number) return cls(distinguisher) except ValueError: raise ValueError('invalid route-distinguisher %s:%s' % (prefix, suffix))
def route_distinguisher (tokeniser): data = tokeniser() separator = data.find(':') if separator > 0: prefix = data[:separator] suffix = int(data[separator+1:]) if '.' in prefix: data = [chr_(0),chr_(1)] data.extend([chr_(int(_)) for _ in prefix.split('.')]) data.extend([chr_(suffix >> 8),chr_(suffix & 0xFF)]) rtd = b''.join(data) else: number = int(prefix) if number < pow(2,16) and suffix < pow(2,32): rtd = chr_(0) + chr_(0) + pack('!H',number) + pack('!L',suffix) elif number < pow(2,32) and suffix < pow(2,16): rtd = chr_(0) + chr_(2) + pack('!L',number) + pack('!H',suffix) else: raise ValueError('invalid route-distinguisher %s' % data) return RouteDistinguisher(rtd)
def route_distinguisher(tokeniser): data = tokeniser() separator = data.find(':') if separator > 0: prefix = data[:separator] suffix = int(data[separator + 1:]) if '.' in prefix: data = [chr_(0), chr_(1)] data.extend([chr_(int(_)) for _ in prefix.split('.')]) data.extend([chr_(suffix >> 8), chr_(suffix & 0xFF)]) rtd = b''.join(data) else: number = int(prefix) if number < pow(2, 16) and suffix < pow(2, 32): rtd = chr_(0) + chr_(0) + pack('!H', number) + pack('!L', suffix) elif number < pow(2, 32) and suffix < pow(2, 16): rtd = chr_(0) + chr_(2) + pack('!L', number) + pack('!H', suffix) else: raise ValueError('invalid route-distinguisher %s' % data) return RouteDistinguisher(rtd)
def _pack (self, packed=None): if self._packed: return self._packed if packed: self._packed = packed return packed self._packed = concat_strs( self.rd.pack(), self.esi.pack(), self.etag.pack(), chr_(self.iplen), self.ip.pack(), self.gwip.pack(), self.label.pack(), ) return self._packed
def _pack(self, packed=None): if self._packed: return self._packed if packed: self._packed = packed return packed self._packed = concat_strs( self.rd.pack(), self.esi.pack(), self.etag.pack(), chr_(self.iplen), self.ip.pack(), self.gwip.pack(), self.label.pack(), ) return self._packed
def __init__ (self, origin, packed=None): self.origin = origin self._packed = self._attribute(packed if packed else chr_(origin))
def __init__ (self, mac=None,packed=None): self.mac = mac self._packed = packed if packed else b''.join(chr_(int(_,16)) for _ in mac.split(":"))
def pack (self): l,v = self.encode(self.value) op = self.operations | _len_to_bit(l) return concat_strs(chr_(op),v)
def encode (self, value): return 1,chr_(value)
def index (self, negotiated=None): addpath = 'no-pi' if self.path_info is PathInfo.NOPATH else self.path_info.pack() mask = chr_(self.cidr.mask) return NLRI._index(self) + addpath + mask + self.cidr.pack_ip()
def resetFlags(char): return chr_( ord_(char) & ~(Attribute.Flag.TRANSITIVE | Attribute.Flag.OPTIONAL))
class Reactor(object): # [hex(ord(c)) for c in os.popen('clear').read()] clear = b''.join([ chr_(int(c, 16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a'] ]) def __init__(self, configurations): self.ip = environment.settings().tcp.bind self.port = environment.settings().tcp.port self.respawn = environment.settings().api.respawn self.max_loop_time = environment.settings().reactor.speed self.early_drop = environment.settings().daemon.drop self.logger = Logger() self.daemon = Daemon(self) self.processes = None self.listener = None self.configuration = Configuration(configurations) self.api = API(self) self.peers = {} self.route_update = False self._stopping = environment.settings().tcp.once self._shutdown = False self._reload = False self._reload_processes = False self._restart = False self._saved_pid = False self._pending = deque() self._running = None signal.signal(signal.SIGTERM, self.sigterm) signal.signal(signal.SIGHUP, self.sighup) signal.signal(signal.SIGALRM, self.sigalrm) signal.signal(signal.SIGUSR1, self.sigusr1) signal.signal(signal.SIGUSR2, self.sigusr2) def sigterm(self, signum, frame): self.logger.reactor('SIG TERM received - shutdown') self._shutdown = True def sighup(self, signum, frame): self.logger.reactor('SIG HUP received - shutdown') self._shutdown = True def sigalrm(self, signum, frame): self.logger.reactor('SIG ALRM received - restart') self._restart = True def sigusr1(self, signum, frame): self.logger.reactor('SIG USR1 received - reload configuration') self._reload = True def sigusr2(self, signum, frame): self.logger.reactor( 'SIG USR2 received - reload configuration and processes') self._reload = True self._reload_processes = True def ready(self, sockets, ios, sleeptime=0): # never sleep a negative number of second (if the rounding is negative somewhere) # never sleep more than one second (should the clock time change during two time.time calls) sleeptime = min(max(0.0, sleeptime), 1.0) if not ios: time.sleep(sleeptime) return [] try: read, _, _ = select.select(sockets + ios, [], [], sleeptime) return read except select.error as exc: errno, message = exc.args # pylint: disable=W0633 if errno not in error.block: raise exc return [] except socket.error as exc: if exc.errno in error.fatal: raise exc return [] def run(self): self.daemon.daemonise() # Make sure we create processes once we have closed file descriptor # unfortunately, this must be done before reading the configuration file # so we can not do it with dropped privileges self.processes = Processes(self) # we have to read the configuration possibly with root privileges # as we need the MD5 information when we bind, and root is needed # to bind to a port < 1024 # this is undesirable as : # - handling user generated data as root should be avoided # - we may not be able to reload the configuration once the privileges are dropped # but I can not see any way to avoid it if not self.load(): return False try: self.listener = Listener() if self.ip: self.listener.listen(IP.create(self.ip), IP.create('0.0.0.0'), self.port, None, False, None) self.logger.reactor('Listening for BGP session(s) on %s:%d' % (self.ip, self.port)) for neighbor in self.configuration.neighbors.values(): if neighbor.listen: self.listener.listen(neighbor.md5_ip, neighbor.peer_address, neighbor.listen, neighbor.md5_password, neighbor.md5_base64, neighbor.ttl_in) self.logger.reactor( 'Listening for BGP session(s) on %s:%d%s' % (neighbor.md5_ip, neighbor.listen, ' with MD5' if neighbor.md5_password else '')) except NetworkError as exc: self.listener = None if os.geteuid() != 0 and self.port <= 1024: self.logger.reactor( 'Can not bind to %s:%d, you may need to run ExaBGP as root' % (self.ip, self.port), 'critical') else: self.logger.reactor( 'Can not bind to %s:%d (%s)' % (self.ip, self.port, str(exc)), 'critical') self.logger.reactor( 'unset exabgp.tcp.bind if you do not want listen for incoming connections', 'critical') self.logger.reactor( 'and check that no other daemon is already binding to port %d' % self.port, 'critical') sys.exit(1) if not self.early_drop: self.processes.start() if not self.daemon.drop_privileges(): self.logger.reactor( 'Could not drop privileges to \'%s\' refusing to run as root' % self.daemon.user, 'critical') self.logger.reactor( 'Set the environmemnt value exabgp.daemon.user to change the unprivileged user', 'critical') return if self.early_drop: self.processes.start() # This is required to make sure we can write in the log location as we now have dropped root privileges if not self.logger.restart(): self.logger.reactor('Could not setup the logger, aborting', 'critical') return if not self.daemon.savepid(): return # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ? reload_completed = True wait = environment.settings().tcp.delay if wait: sleeptime = (wait * 60) - int(time.time()) % (wait * 60) self.logger.reactor('waiting for %d seconds before connecting' % sleeptime) time.sleep(float(sleeptime)) workers = {} peers = set() scheduled = False while True: try: finished = False start = time.time() end = start + self.max_loop_time if self._shutdown: self._shutdown = False self.shutdown() break if self._reload and reload_completed: self._reload = False self.load() self.processes.start(self._reload_processes) self._reload_processes = False elif self._restart: self._restart = False self.restart() # We got some API routes to announce if self.route_update: self.route_update = False self.route_send() for peer in self.peers.keys(): peers.add(peer) while start < time.time() < end and not finished: if self.peers: for key in list(peers): peer = self.peers[key] action = peer.run() # .run() returns an ACTION enum: # * immediate if it wants to be called again # * later if it should be called again but has no work atm # * close if it is finished and is closing down, or restarting if action == ACTION.CLOSE: self.unschedule(key) peers.discard(key) # we are loosing this peer, not point to schedule more process work elif action == ACTION.LATER: for io in peer.sockets(): workers[io] = key # no need to come back to it before a a full cycle peers.discard(key) if not peers: reload_completed = True if self.listener: for connection in self.listener.connected(): # found # * False, not peer found for this TCP connection # * True, peer found # * None, conflict found for this TCP connections found = False for key in self.peers: peer = self.peers[key] neighbor = peer.neighbor # XXX: FIXME: Inet can only be compared to Inet if connection.local == str( neighbor.peer_address) and ( neighbor.auto_discovery or connection.peer == str( neighbor.local_address)): if peer.incoming(connection): found = True break found = None break if found: self.logger.reactor( 'accepted connection from %s - %s' % (connection.local, connection.peer)) elif found is False: self.logger.reactor( 'no session configured for %s - %s' % (connection.local, connection.peer)) connection.notification( 6, 3, 'no session configured for the peer') connection.close() elif found is None: self.logger.reactor( 'connection refused (already connected to the peer) %s - %s' % (connection.local, connection.peer)) connection.notification( 6, 5, 'could not accept the connection') connection.close() scheduled = self.schedule() finished = not peers and not scheduled # RFC state that we MUST not send more than one KEEPALIVE / sec # And doing less could cause the session to drop if finished: for io in self.ready(list(peers), self.processes.fds(), end - time.time()): if io in workers: peers.add(workers[io]) del workers[io] if self._stopping and not self.peers.keys(): break except KeyboardInterrupt: while True: try: self._shutdown = True self.logger.reactor('^C received') break except KeyboardInterrupt: pass # socket.error is a subclass of IOError (so catch it first) except socket.error: try: self._shutdown = True self.logger.reactor('socket error received', 'warning') break except KeyboardInterrupt: pass except IOError: while True: try: self._shutdown = True self.logger.reactor( 'I/O Error received, most likely ^C during IO', 'warning') break except KeyboardInterrupt: pass except SystemExit: try: self._shutdown = True self.logger.reactor('exiting') break except KeyboardInterrupt: pass except ProcessError: try: self._shutdown = True self.logger.reactor( 'Problem when sending message(s) to helper program, stopping', 'error') except KeyboardInterrupt: pass except select.error: try: self._shutdown = True self.logger.reactor('problem using select, stopping', 'error') except KeyboardInterrupt: pass # from exabgp.leak import objgraph # print objgraph.show_most_common_types(limit=20) # import random # obj = objgraph.by_type('Route')[random.randint(0,2000)] # objgraph.show_backrefs([obj], max_depth=10) def shutdown(self): """terminate all the current BGP connections""" self.logger.reactor('performing shutdown') if self.listener: self.listener.stop() self.listener = None for key in self.peers.keys(): self.peers[key].stop() self.processes.terminate() self.daemon.removepid() self._stopping = True def load(self): """reload the configuration and send to the peer the route which changed""" self.logger.reactor('performing reload of exabgp %s' % version) reloaded = self.configuration.reload() if not reloaded: # # Careful the string below is used but the QA code to check for sucess of failure self.logger.configuration( 'problem with the configuration file, no change done', 'error') # Careful the string above is used but the QA code to check for sucess of failure # self.logger.configuration(str(self.configuration.error), 'error') return False for key, peer in self.peers.items(): if key not in self.configuration.neighbors: self.logger.reactor('removing peer: %s' % peer.neighbor.name()) peer.stop() for key, neighbor in self.configuration.neighbors.items(): # new peer if key not in self.peers: self.logger.reactor('new peer: %s' % neighbor.name()) peer = Peer(neighbor, self) self.peers[key] = peer # modified peer elif self.peers[key].neighbor != neighbor: self.logger.reactor( 'peer definition change, establishing a new connection for %s' % str(key)) self.peers[key].reestablish(neighbor) # same peer but perhaps not the routes else: # finding what route changed and sending the delta is not obvious self.logger.reactor( 'peer definition identical, updating peer routes if required for %s' % str(key)) self.peers[key].reconfigure(neighbor) self.logger.configuration('loaded new configuration successfully', 'info') return True def schedule(self): try: # read at least on message per process if there is some and parse it for service, command in self.processes.received(): self.api.text(self, service, command) # if we have nothing to do, return or save the work if not self._running: if not self._pending: return False self._running, name = self._pending.popleft() self.logger.reactor('callback | installing %s' % name) if self._running: # run it try: self.logger.reactor('callback | running') six.next(self._running) # run # should raise StopIteration in most case # and prevent us to have to run twice to run one command six.next(self._running) # run except StopIteration: self._running = None self.logger.reactor('callback | removing') return True except StopIteration: pass except KeyboardInterrupt: self._shutdown = True self.logger.reactor('^C received', 'error') def route_send(self): """the process ran and we need to figure what routes to changes""" self.logger.reactor('performing dynamic route update') for key in self.configuration.neighbors.keys(): self.peers[key].send_new() self.logger.reactor('updated peers dynamic routes successfully') def restart(self): """kill the BGP session and restart it""" self.logger.reactor('performing restart of exabgp %s' % version) self.configuration.reload() for key in self.peers.keys(): if key not in self.configuration.neighbors.keys(): neighbor = self.configuration.neighbors[key] self.logger.reactor('removing Peer %s' % neighbor.name()) self.peers[key].stop() else: self.peers[key].reestablish() self.processes.terminate() self.processes.start() def unschedule(self, peer): if peer in self.peers: del self.peers[peer] def answer(self, service, string): self.processes.write(service, string) self.logger.reactor('responding to %s : %s' % (service, string.replace('\n', '\\n'))) def api_shutdown(self): self._shutdown = True self._pending = deque() self._running = None def api_reload(self): self._reload = True self._pending = deque() self._running = None def api_restart(self): self._restart = True self._pending = deque() self._running = None @staticmethod def match_neighbor(description, name): for string in description: if re.search(r'(^|[\s])%s($|[\s,])' % re.escape(string), name) is None: return False return True def match_neighbors(self, descriptions): """return the sublist of peers matching the description passed, or None if no description is given""" if not descriptions: return self.peers.keys() returned = [] for key in self.peers: for description in descriptions: if Reactor.match_neighbor(description, key): if key not in returned: returned.append(key) return returned def nexthops(self, peers): return dict( (peer, self.peers[peer].neighbor.local_address) for peer in peers) def plan(self, callback, name): self._pending.append((callback, name))
def message (self,negotiated=None): return self._message(concat_strs( chr_(self.code), chr_(self.subcode), self.data ))
def __init__(self, origin, packed=None): self.origin = origin self._packed = self._attribute(packed if packed else chr_(origin))
def _from_json (self, json_string): try: parsed = json.loads(json_string) except ValueError: print('invalid JSON message', file=sys.stderr) sys.exit(1) if parsed.get('exabgp','0.0.0') != json_version: print('invalid json version', json_string, file=sys.stderr) sys.exit(1) content = parsed.get('type','') if not content: print('invalid json content', json_string, file=sys.stderr) sys.exit(1) neighbor = _FakeNeighbor( parsed['neighbor']['address']['local'], parsed['neighbor']['address']['peer'], parsed['neighbor']['asn']['local'], parsed['neighbor']['asn']['peer'], ) if content == 'state': self._state() return json_string direction = parsed['neighbor']['direction'] category = parsed['neighbor']['message']['category'] header = parsed['neighbor']['message']['header'] body = parsed['neighbor']['message']['body'] raw = b''.join(chr_(int(body[_:_+2],16)) for _ in range(0,len(body),2)) if content == 'open': message = Open.unpack_message(raw) self._open(direction,message) return self.encoder.open(neighbor,direction,message,header,body) if content == 'keapalive': return self.encoder.keepalive(neighbor,direction,header,body) if content == 'notification': message = Notification.unpack_message(raw) if (message.code, message.subcode) != (6, 2): message.data = data if not len([_ for _ in data if _ not in string.printable]) else hexstring(data) return self.encoder.notification(neighbor,direction,message,header,body) if len(data) == 0: # shutdown without shutdown communication (the old fashioned way) message.data = '' return self.encoder.notification(neighbor,direction,message,header,body) # draft-ietf-idr-shutdown or the peer was using 6,2 with data shutdown_length = ord(data[0]) data = data[1:] if shutdown_length == 0: message.data = "empty Shutdown Communication." # move offset past length field return self.encoder.notification(neighbor,direction,message,header,body) if len(data) < shutdown_length: message.data = "invalid Shutdown Communication (buffer underrun) length : %i [%s]" % (shutdown_length, hexstring(data)) return self.encoder.notification(neighbor,direction,message,header,body) if shutdown_length > 128: message.data = "invalid Shutdown Communication (too large) length : %i [%s]" % (shutdown_length, hexstring(data)) return self.encoder.notification(neighbor,direction,message,header,body) try: message.data = 'Shutdown Communication: "%s"' % \ data[:shutdown_length].decode('utf-8').replace('\r',' ').replace('\n',' ') except UnicodeDecodeError: message.data = "invalid Shutdown Communication (invalid UTF-8) length : %i [%s]" % (shutdown_length, hexstring(data)) return self.encoder.notification(neighbor,direction,message,header,body) trailer = data[shutdown_length:] if trailer: message.data += ", trailing data: " + hexstring(trailer) return self.encoder.notification(neighbor,direction,message,header,body) if not self.negotiated: print('invalid message sequence, open not exchange not complete', json_string, file=sys.stderr) sys.exit(1) message = Message.unpack(category,raw,self.negotiated) if content == 'update': return self.encoder.update(neighbor, direction, message, header,body) if content == 'eor': # XXX: Should not be required return self.encoder.update(neighbor, direction, message, header,body) if content == 'refresh': return self.json.refresh(neighbor, direction, message, header,body) if content == 'operational': return self.json.refresh(neighbor, direction, message, header,body) raise RuntimeError('the programer is a monkey and forgot a JSON message type')
class Message(Exception): # we need to define TYPE inside __init__ of the subclasses # otherwise we can not dynamically create different UnknownMessage # TYPE = None MARKER = chr_(0xff) * 16 HEADER_LEN = 19 MAX_LEN = 4096 registered_message = {} # This is redefined by the Notify class, Exception is never used klass_notify = Exception klass_unknown = Exception class CODE(object): NOP = _MessageCode(_MessageCode.NOP) OPEN = _MessageCode(_MessageCode.OPEN) UPDATE = _MessageCode(_MessageCode.UPDATE) NOTIFICATION = _MessageCode(_MessageCode.NOTIFICATION) KEEPALIVE = _MessageCode(_MessageCode.KEEPALIVE) ROUTE_REFRESH = _MessageCode(_MessageCode.ROUTE_REFRESH) OPERATIONAL = _MessageCode(_MessageCode.OPERATIONAL) MESSAGES = [ NOP, OPEN, UPDATE, NOTIFICATION, KEEPALIVE, ROUTE_REFRESH, OPERATIONAL ] @staticmethod def name(message_id): return _MessageCode.names.get( message_id, 'unknown message %s' % hex(message_id)) @staticmethod def short(message_id): return _MessageCode.short_names.get( message_id, 'unknown message %s' % hex(message_id)) # # Can raise KeyError # @staticmethod # def code (short): # return _MessageCode.names.get[short] def __init__(self): raise RuntimeError('This class can not be instantiated') Length = { CODE.OPEN: lambda _: _ >= 29, # noqa CODE.UPDATE: lambda _: _ >= 23, # noqa CODE.NOTIFICATION: lambda _: _ >= 21, # noqa CODE.KEEPALIVE: lambda _: _ == 19, # noqa CODE.ROUTE_REFRESH: lambda _: _ == 23, # noqa } def __init__(self): Exception.__init__(self) @staticmethod def string(code): return _MessageCode.long_names.get(code, 'unknown') def _message(self, message): message_len = pack('!H', 19 + len(message)) return concat_strs(self.MARKER, message_len, self.TYPE, message) def message(self, negotiated=None): raise NotImplementedError('message not implemented in subclasses') @classmethod def register(cls, klass): if klass.TYPE in cls.registered_message: raise RuntimeError('only one class can be registered per message') cls.registered_message[ord(klass.TYPE)] = klass return klass @classmethod def notify(cls, klass): cls.klass_notify = klass return klass @classmethod def klass(cls, what): if what in cls.registered_message: return cls.registered_message[what] raise cls.klass_notify(2, 4, 'can not handle message %s' % what) @classmethod def unpack(cls, message, data, negotiated): if message in cls.registered_message: return cls.klass(message).unpack_message(data, negotiated) return cls.klass_unknown(message, data, negotiated) @classmethod def code(cls, name): for message in cls.CODE.MESSAGES: if name == str(message) or name == message.short(): return message return cls.CODE.NOP
def from_rt(cls, route_target): packed = route_target.pack() return cls.unpack( concat_strs(packed[0:1], chr_(cls.COMMUNITY_SUBTYPE), packed[2:]))
def pack (self): raw = self.cidr.pack_nlri() # ID is defined in subclasses return concat_strs(chr_(self.ID),raw) # pylint: disable=E1101
def pack (self, negotiated=None): addpath = self.path_info.pack() if negotiated and negotiated.addpath.send(self.afi,self.safi) else b'' mask = chr_(len(self.labels)*8 + self.cidr.mask) return addpath + mask + self.labels.pack() + self.cidr.pack_ip()
def __init__(self, mac=None, packed=None): self.mac = mac self._packed = packed if packed else b''.join( chr_(int(_, 16)) for _ in mac.split(":"))
def encode (self, value): if value < (1 << 8): return 1,chr_(value) return 2,pack('!H',value)
def __hash__ (self): return hash(chr_(self.mask)+self._packed)
def pack(self): return chr_(self)
def pack_nlri (self): return chr_(self.mask) + self._packed[:CIDR.size(self.mask)]
def pack (self): # ID is defined in subclasses return concat_strs(chr_(self.ID),chr_(self.cidr.mask),chr_(self.offset),self.cidr.pack_ip()) # pylint: disable=E1101
def pack (self): return chr_(self)
def resetFlags(char): return chr_(ord_(char) & ~(Attribute.Flag.TRANSITIVE | Attribute.Flag.OPTIONAL))
def from_rt(cls, route_target): packed = route_target.pack() return cls.unpack(concat_strs(packed[0:1], chr_(cls.COMMUNITY_SUBTYPE), packed[2:]))
def test101_EVPNHashEqual_somefieldsvary(self): ''' Two EVPN MAC NLRIs differing by their ESI or label or RD, or nexthop, but otherwise identical should hash to the same value, and be equal ''' nlri0 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6*8, Labels([42], True), IP.create("1.1.1.1")) # ESI nlri1 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(b''.join(chr_(1) for _ in range(0,10))), EthernetTag(111), MAC("01:02:03:04:05:06"), 6*8, Labels([42], True), IP.create("1.1.1.1")) # label nlri2 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6*8, Labels([4444], True), IP.create("1.1.1.1")) # IP: different IPs, but same MACs: different route nlri3 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6*8, Labels([42], True), IP.create("2.2.2.2")) # with a next hop... nlri4 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6*8, Labels([42], True), IP.create("1.1.1.1"), IP.pton("10.10.10.10")) nlri5 = EVPNMAC(RouteDistinguisher.fromElements("42.42.42.42", 5), ESI(), EthernetTag(111), MAC("01:02:03:04:05:06"), 6*8, Labels([42], True), IP.create("1.1.1.1"), IP.pton("11.11.11.11")) self.assertEqual(hash(nlri0), hash(nlri1)) self.assertEqual(hash(nlri0), hash(nlri2)) self.assertEqual(hash(nlri0), hash(nlri4)) self.assertEqual(nlri0, nlri1) self.assertEqual(nlri0, nlri2) self.assertEqual(nlri0, nlri4) self.assertEqual(nlri1, nlri2) self.assertEqual(nlri1, nlri4) self.assertEqual(nlri2, nlri4) self.assertEqual(nlri4, nlri5) self.assertNotEqual(hash(nlri0), hash(nlri3)) self.assertNotEqual(nlri0, nlri3) self.assertNotEqual(nlri1, nlri3) self.assertNotEqual(nlri2, nlri3) self.assertNotEqual(nlri3, nlri4)
def message(self, negotiated=None): return self._message( concat_strs(chr_(self.code), chr_(self.subcode), self.data))