Exemple #1
0
    def connect(self):
        # allows to test the protocol code using modified StringIO with a extra 'pending' function
        if not self.connection:
            peer = self.neighbor.peer_address
            local = self.neighbor.local_address
            md5 = self.neighbor.md5
            ttl = self.neighbor.ttl
            self.connection = Connection(peer, local, md5, ttl)

            message = 'neighbor %s connected\n' % self.peer.neighbor.peer_address
            try:
                proc = self.peer.supervisor.processes
                for name in proc.notify(self.neighbor.peer_address):
                    proc.write(name, message)
            except ProcessError:
                raise Failure(
                    'Could not send message(s) to helper program(s) : %s' %
                    message)
Exemple #2
0
	def connect (self):
		# allows to test the protocol code using modified StringIO with a extra 'pending' function
		if not self.connection:
			peer = self.neighbor.peer_address
			local = self.neighbor.local_address
			md5 = self.neighbor.md5
			ttl = self.neighbor.ttl
			self.connection = Connection(peer,local,md5,ttl)

			message = 'neighbor %s connected\n' % self.peer.neighbor.peer_address
			try:
				proc = self.peer.supervisor.processes
				for name in proc.notify(self.neighbor.peer_address):
					proc.write(name,message)
			except ProcessError:
				raise Failure('Could not send message(s) to helper program(s) : %s' % message)
Exemple #3
0
class Protocol(object):
    decode = True
    strict = False

    def __init__(self, peer, connection=None):
        self.peer = peer
        self.neighbor = peer.neighbor
        self.connection = connection
        self._delta = Delta(Table(peer))
        self._asn4 = False
        self._messages = {}
        self._frozen = 0
        self.message_size = 4096

    # XXX: we use self.peer.neighbor.peer_address when we could use self.neighbor.peer_address

    def me(self, message):
        return "Peer %15s ASN %-7s %s" % (self.peer.neighbor.peer_address,
                                          self.peer.neighbor.peer_as, message)

    def connect(self):
        # allows to test the protocol code using modified StringIO with a extra 'pending' function
        if not self.connection:
            peer = self.neighbor.peer_address
            local = self.neighbor.local_address
            md5 = self.neighbor.md5
            ttl = self.neighbor.ttl
            self.connection = Connection(peer, local, md5, ttl)

            message = 'neighbor %s connected\n' % self.peer.neighbor.peer_address
            try:
                proc = self.peer.supervisor.processes
                for name in proc.notify(self.neighbor.peer_address):
                    proc.write(name, message)
            except ProcessError:
                raise Failure(
                    'Could not send message(s) to helper program(s) : %s' %
                    message)

    def check_keepalive(self):
        left = int(self.connection.last_read + self.neighbor.hold_time -
                   time.time())
        if left <= 0:
            raise Notify(4, 0)
        return left

    def close(self):
        #self._delta.last = 0
        if self.connection:
            # must be first otherwise we could have a loop caused by the raise in the below
            self.connection.close()
            self.connection = None

            message = 'neighbor %s down\n' % self.peer.neighbor.peer_address
            try:
                proc = self.peer.supervisor.processes
                for name in proc.notify(self.neighbor.peer_address):
                    proc.write(name, message)
            except ProcessError:
                raise Failure(
                    'Could not send message(s) to helper program(s) : %s' %
                    message)

    # Read from network .......................................................

    def read_message(self):
        # This call reset the time for the timeout in
        if not self.connection.pending(True):
            return NOP('')

        length = 19
        data = ''
        while length:
            if self.connection.pending():
                delta = self.connection.read(length)
                data += delta
                length -= len(delta)
                # The socket is closed
                if not data:
                    raise Failure('The TCP connection is closed')

        if data[:16] != Message.MARKER:
            # We are speaking BGP - send us a valid Marker
            raise Notify(1, 1,
                         'The packet received does not contain a BGP marker')

        raw_length = data[16:18]
        length = unpack('!H', raw_length)[0]
        msg = data[18]

        if (length < 19 or length > 4096):
            # BAD Message Length
            raise Notify(1, 2)

        if ((msg == Open.TYPE and length < 29)
                or (msg == Update.TYPE and length < 23)
                or (msg == Notification.TYPE and length < 21)
                or (msg == KeepAlive.TYPE and length != 19)):
            # MUST send the faulty length back
            raise Notify(1, 2, raw_length)
            #(msg == RouteRefresh.TYPE and length != 23)

        length -= 19
        data = ''
        while length:
            if self.connection.pending():
                delta = self.connection.read(length)
                data += delta
                length -= len(delta)
                # The socket is closed
                if not data:
                    raise Failure('The TCP connection is closed')

        if msg == Notification.TYPE:
            raise Notification(ord(data[0]), ord(data[1]))

        if msg == KeepAlive.TYPE:
            return self.KeepAliveFactory(data)

        if msg == Open.TYPE:
            return self.OpenFactory(data)

        if msg == Update.TYPE:
            if self.neighbor.parse_routes:
                update = self.UpdateFactory(data)
                return update
            else:
                return NOP('')

        if self.strict:
            raise Notify(1, 3, msg)

        return NOP(data)

    def read_open(self, _open, ip):
        message = self.read_message()

        if message.TYPE == NOP.TYPE:
            return message

        if message.TYPE != Open.TYPE:
            raise Notify(
                5, 1, 'The first packet received is not an open message (%s)' %
                message)

        if _open.asn.asn4() and not message.capabilities.announced(
                Capabilities.FOUR_BYTES_ASN):
            raise Notify(2, 0, 'We have an ASN4 and you do not speak it. bye.')

        self._asn4 = message.capabilities.announced(
            Capabilities.FOUR_BYTES_ASN)

        if message.asn == AS_TRANS:
            peer_as = message.capabilities[Capabilities.FOUR_BYTES_ASN]
        else:
            peer_as = message.asn

        if peer_as != self.neighbor.peer_as:
            raise Notify(
                2, 2, 'ASN in OPEN (%d) did not match ASN expected (%d)' %
                (message.asn, self.neighbor.peer_as))

        # RFC 6286 : http://tools.ietf.org/html/rfc6286
        #if message.router_id == RouterID('0.0.0.0'):
        #	message.router_id = RouterID(ip)
        if message.router_id == RouterID('0.0.0.0'):
            raise Notify(
                2, 3, '0.0.0.0 is an invalid router_id according to RFC6286')
        if message.router_id == self.neighbor.router_id and message.asn == self.neighbor.local_as:
            raise Notify(
                2, 3,
                'BGP Indendifier collision (%s) on IBGP according to RFC 6286'
                % message.router_id)

        if message.hold_time < 3:
            raise Notify(2, 6, 'Hold Time is invalid (%d)' % message.hold_time)
        if message.hold_time >= 3:
            self.neighbor.hold_time = min(self.neighbor.hold_time,
                                          message.hold_time)

        # XXX: Does not work as the capa is not yet defined
        if message.capabilities.announced(Capabilities.EXTENDED_MESSAGE):
            # untested !
            if self.peer.bgp.message_size:
                self.message_size = self.peer.bgp.message_size

# README: This limit what we are announcing may cause some issue if you add new family and SIGHUP
# README: So it is commented until I make my mind to add it or not (as Juniper complain about mismatch capabilities)
#		# Those are the capacity we need to announce those routes
#		for family in _open.capabilities[Capabilities.MULTIPROTOCOL_EXTENSIONS]:
#			# if the peer does not support them, tear down the session
#			if family not in message.capabilities[Capabilities.MULTIPROTOCOL_EXTENSIONS]:
#				afi,safi = family
#				raise Notify(2,0,'Peers does not speak %s %s' % (afi,safi))

        return message

    def read_keepalive(self):
        message = self.read_message()
        if message.TYPE == NOP.TYPE:
            return message
        if message.TYPE != KeepAlive.TYPE:
            raise Notify(5, 2)
        return message

    # Sending message to peer .................................................

    # we do not buffer those message in purpose

    def new_open(self, restarted, asn4):
        if asn4:
            asn = self.neighbor.local_as
        else:
            asn = AS_TRANS

        o = Open(4, asn, self.neighbor.router_id.ip,
                 Capabilities().default(self.neighbor, restarted),
                 self.neighbor.hold_time)

        if not self.connection.write(o.message()):
            raise Failure('Could not send open')
        return o

    def new_keepalive(self, force=False):
        left = int(self.connection.last_write +
                   self.neighbor.hold_time.keepalive() - time.time())
        k = KeepAlive()
        m = k.message()
        if force:
            written = self.connection.write(k.message())
            if not written:
                logger.message(self.me(">> KEEPALIVE buffered"))
                self._messages[self.neighbor.peer_as].append(('KEEPALIVE', m))
            else:
                self._frozen = 0
            return left, k
        if left <= 0:
            written = self.connection.write(k.message())
            if not written:
                logger.message(self.me(">> KEEPALIVE buffered"))
                self._messages[self.neighbor.peer_as].append(('KEEPALIVE', m))
            else:
                self._frozen = 0
            return left, k
        return left, None

    def new_notification(self, notification):
        return self.connection.write(notification.message())

    # messages buffered in case of failure

    def buffered(self):
        return self._messages.get(self.neighbor.peer_as, []) != []

    def _backlog(self, maximum=0):
        backlog = self._messages.get(self.neighbor.peer_as, [])
        if backlog:
            if not self._frozen:
                self._frozen = time.time()
            if self._frozen and self._frozen + (
                    self.neighbor.hold_time) < time.time():
                raise Failure(
                    'peer %s not reading on socket - killing session' %
                    self.neighbor.peer_as)
            logger.message(
                self.me(
                    "unable to send route for %d second (maximum allowed %d)" %
                    (time.time() - self._frozen, self.neighbor.hold_time)))
            nb_backlog = len(backlog)
            if nb_backlog > MAX_BACKLOG:
                raise Failure(
                    'over %d routes buffered for peer %s - killing session' %
                    (MAX_BACKLOG, self.neighbor.peer_as))
            logger.message(
                self.me("backlog of %d/%d routes" % (nb_backlog, MAX_BACKLOG)))
        count = 0
        while backlog:
            count += 1
            name, update = backlog[0]
            written = self.connection.write(update)
            if not written:
                break
            logger.message(self.me(">> DEBUFFERED %s" % name))
            backlog.pop(0)
            self._frozen = 0
            yield count
            if maximum and count >= maximum:
                break
        self._messages[self.neighbor.peer_as] = backlog

    def _announce(self, name, generator):
        def chunked(generator, size):
            chunk = ''
            for data in generator:
                if len(data) > size:
                    raise Failure(
                        'Can not send BGP update larger than %d bytes on this connection.'
                        % size)
                if len(chunk) + len(data) <= size:
                    chunk += data
                    continue
                yield chunk
                chunk = data
            if chunk:
                yield chunk

        count = 0
        # The message size is the whole BGP message INCLUDING headers !
        for update in chunked(generator, self.message_size - 19):
            count += 1
            if self._messages[self.neighbor.peer_as]:
                logger.message(
                    self.
                    me(">> %s could not be sent, some messages are still in the buffer"
                       % name))
                self._messages[self.neighbor.peer_as].append((name, update))
                continue
            written = self.connection.write(update)
            if not written:
                logger.message(self.me(">> %s buffered" % name))
                self._messages[self.neighbor.peer_as].append((name, update))
            yield count

    def new_announce(self):
        for answer in self._backlog():
            yield answer
        asn4 = not not self.peer.open.capabilities.announced(
            Capabilities.FOUR_BYTES_ASN)
        for answer in self._announce(
                'UPDATE',
                self._delta.announce(asn4, self.neighbor.local_as,
                                     self.neighbor.peer_as)):
            yield answer

    def new_update(self):
        for answer in self._backlog():
            yield answer
        asn4 = not not self.peer.open.capabilities.announced(
            Capabilities.FOUR_BYTES_ASN)
        for answer in self._announce(
                'UPDATE',
                self._delta.update(asn4, self.neighbor.local_as,
                                   self.neighbor.peer_as)):
            yield answer

    def new_eors(self, families):
        for answer in self._backlog():
            pass
        eor = EOR()
        eors = eor.eors(families)
        for answer in self._announce('EOR', eors):
            pass

    # Message Factory .................................................

    def KeepAliveFactory(self, data):
        return KeepAlive()

    def _key_values(self, name, data):
        if len(data) < 2:
            raise Notify(2, 0,
                         "Bad length for OPEN %s (<2) %s" % (name, hexa(data)))
        l = ord(data[1])
        boundary = l + 2
        if len(data) < boundary:
            raise Notify(
                2, 0, "Bad length for OPEN %s (buffer underrun) %s" %
                (name, hexa(data)))
        key = ord(data[0])
        value = data[2:boundary]
        rest = data[boundary:]
        return key, value, rest

    def CapabilitiesFactory(self, data):
        capabilities = Capabilities()
        option_len = ord(data[0])
        if option_len:
            data = data[1:]
            while data:
                key, value, data = self._key_values('parameter', data)
                # Paramaters must only be sent once.
                if key == Parameter.AUTHENTIFICATION_INFORMATION:
                    raise Notify(2, 5)

                if key == Parameter.CAPABILITIES:
                    while value:
                        k, capv, value = self._key_values('capability', value)
                        # Multiple Capabilities can be present in a single attribute
                        #if r:
                        #	raise Notify(2,0,"Bad length for OPEN %s (size mismatch) %s" % ('capability',hexa(value)))

                        if k == Capabilities.MULTIPROTOCOL_EXTENSIONS:
                            if k not in capabilities:
                                capabilities[k] = MultiProtocol()
                            afi = AFI(unpack('!H', capv[:2])[0])
                            safi = SAFI(ord(capv[3]))
                            capabilities[k].append((afi, safi))
                            continue

                        if k == Capabilities.GRACEFUL_RESTART:
                            restart = unpack('!H', capv[:2])[0]
                            restart_flag = restart >> 12
                            restart_time = restart & Graceful.TIME_MASK
                            value_gr = capv[2:]
                            families = []
                            while value_gr:
                                afi = AFI(unpack('!H', value_gr[:2])[0])
                                safi = SAFI(ord(value_gr[2]))
                                flag_family = ord(value_gr[0])
                                families.append((afi, safi, flag_family))
                                value_gr = value_gr[4:]
                            capabilities[k] = Graceful(restart_flag,
                                                       restart_time, families)
                            continue

                        if k == Capabilities.FOUR_BYTES_ASN:
                            capabilities[k] = ASN(unpack('!L', capv[:4])[0])
                            continue

                        if k == Capabilities.ROUTE_REFRESH:
                            capabilities[k] = RouteRefresh()
                            continue

                        if k == Capabilities.CISCO_ROUTE_REFRESH:
                            capabilities[k] = CiscoRouteRefresh()
                            continue

                        if k == Capabilities.MULTISESSION_BGP:
                            capabilities[k] = MultiSession()
                            continue
                        if k == Capabilities.MULTISESSION_BGP_RFC:
                            capabilities[k] = MultiSession()
                            continue

                        if k not in capabilities:
                            capabilities[k] = Unknown(k,
                                                      [ord(_) for _ in capv])
                else:
                    raise Notify(2, 0, 'Unknown OPEN parameter %s' % hex(key))
        return capabilities

    def OpenFactory(self, data):
        version = ord(data[0])
        if version != 4:
            # Only version 4 is supported nowdays..
            raise Notify(2, 1, data[0])
        asn = unpack('!H', data[1:3])[0]
        hold_time = unpack('!H', data[3:5])[0]
        numeric = unpack('!L', data[5:9])[0]
        router_id = "%d.%d.%d.%d" % (numeric >> 24, (numeric >> 16) & 0xFF,
                                     (numeric >> 8) & 0xFF, numeric & 0xFF)
        capabilities = self.CapabilitiesFactory(data[9:])
        return Open(version, asn, router_id, capabilities, hold_time)

    def UpdateFactory(self, data):
        length = len(data)
        # withdrawn
        lw, withdrawn, data = defix(data)
        if len(withdrawn) != lw:
            raise Notify(3, 1)
        la, attribute, announced = defix(data)
        if len(attribute) != la:
            raise Notify(3, 1)
        # The RFC check ...
        #if lw + la + 23 > length:
        if 2 + lw + 2 + la + len(announced) != length:
            raise Notify(3, 1)

        routes = []
        while withdrawn:
            nlri = BGPPrefix(AFI.ipv4, withdrawn)
            route = ReceivedRoute(nlri, 'withdraw')
            withdrawn = withdrawn[len(nlri):]
            routes.append(route)

        self.mp_routes = []
        attributes = self.AttributesFactory(attribute)
        routes.extend(self.mp_routes)

        while announced:
            nlri = BGPPrefix(AFI.ipv4, announced)
            route = ReceivedRoute(nlri, 'announce')
            # XXX: Should this be a deep copy
            route.attributes = attributes
            announced = announced[len(nlri):]
            routes.append(route)
            #logger.info(self.me('Received route %s' % nlri))

        #print "routes", routes
        #print "attributes", attributes

        if routes:
            return Update(routes)
        return NOP('')

    def AttributesFactory(self, data):
        try:
            self.attributes = Attributes()
            return self._AttributesFactory(data).attributes
        except IndexError:
            raise Notify(3, 2, data)

    def __new_ASPath(self, data, asn4=False):

        if len(data) == 0:
            return ASPath(asn4)

        if asn4:
            size = 4
            decoder = 'L'  # could it be 'I' as well ?
        else:
            size = 2
            decoder = 'H'
        stype = ord(data[0])
        slen = ord(data[1])
        sdata = data[2:2 + (slen * size)]

        ASPS = ASPath(asn4, stype)
        format = '!' + (decoder * slen)
        for c in unpack(format, sdata):
            ASPS.add(c)
        return ASPS

    def __new_AS4Path(self, data):
        stype = ord(data[0])
        slen = ord(data[1])
        sdata = data[2:2 + (slen * 4)]

        ASPS = AS4Path(stype)
        format = '!' + ('L' * slen)
        for c in unpack(format, sdata):
            ASPS.add(c)
        return ASPS

    def __merge_attributes(self):
        as2path = self.attributes[AttributeID.AS_PATH]
        as4path = self.attributes[AttributeID.AS4_PATH]
        newASPS = ASPath(True, as2path.asptype)
        len2 = len(as2path.aspsegment)
        len4 = len(as4path.aspsegment)

        if len2 < len4:
            for asn in as4path.aspsegment:
                newASPS.add(asn)
        else:
            for asn in as2path.aspsegment[:-len4]:
                newASPS.add(asn)
            for asn in as4path.aspsegment:
                newASPS.add(asn)

        self.attributes.remove(AttributeID.AS_PATH)
        self.attributes.remove(AttributeID.AS4_PATH)
        self.attributes.add(newASPS)

        #raise Notify(3,1,'could not merge AS4_PATH in AS_PATH')

    def __new_communities(self, data):
        communities = Communities()
        while data:
            community = unpack('!L', data[:4])[0]
            data = data[4:]
            if data and len(data) < 4:
                raise Notify(
                    3, 1, 'could not decode community %s' %
                    str([hex(ord(_)) for _ in data]))
            communities.add(Community(community))
        return communities

    def __new_extended_communities(self, data):
        communities = ECommunities()
        while data:
            community = data[:8]
            data = data[8:]
            if data and len(data) < 8:
                raise Notify(
                    3, 1, 'could not decode extended community %s' %
                    str([hex(ord(_)) for _ in data]))
            communities.add(ECommunity.unpackFrom(community))
        return communities

    def _AttributesFactory(self, data):
        if not data:
            return self

        # We do not care if the attribute are transitive or not as we do not redistribute
        flag = Flag(ord(data[0]))
        code = AttributeID(ord(data[1]))

        if flag & Flag.EXTENDED_LENGTH:
            length = unpack('!H', data[2:4])[0]
            offset = 4
        else:
            length = ord(data[2])
            offset = 3

        data = data[offset:]

        #		if not length:
        #			return self._AttributesFactory(data[length:])

        # XXX: This code does not make sure that attributes are unique - or does it ?

        if code == AttributeID.ORIGIN:
            logger.parser('parsing origin')
            self.attributes.add(Origin(ord(data[0])))
            return self._AttributesFactory(data[length:])

        if code == AttributeID.AS_PATH:
            logger.parser('parsing as_path')
            self.attributes.add(self.__new_ASPath(data[:length], self._asn4))
            if not self._asn4 and self.attributes.has(AttributeID.AS4_PATH):
                self.__merge_attributes()
            return self._AttributesFactory(data[length:])

        if code == AttributeID.AS4_PATH:
            logger.parser('parsing as_path')
            self.attributes.add(self.__new_AS4Path(data[:length]))
            if not self._asn4 and self.attributes.has(AttributeID.AS_PATH):
                self.__merge_attributes()
            return self._AttributesFactory(data[length:])

        if code == AttributeID.NEXT_HOP:
            logger.parser('parsing next-hop')
            self.attributes.add(NextHop(Inet(AFI.ipv4, data[:4])))
            return self._AttributesFactory(data[length:])

        if code == AttributeID.MED:
            logger.parser('parsing med')
            self.attributes.add(MED(unpack('!L', data[:4])[0]))
            return self._AttributesFactory(data[length:])

        if code == AttributeID.LOCAL_PREF:
            logger.parser('parsing local-preference')
            self.attributes.add(LocalPreference(unpack('!L', data[:4])[0]))
            return self._AttributesFactory(data[length:])

        if code == AttributeID.ORIGINATOR_ID:
            logger.parser('parsing originator-id')
            self.attributes.add(OriginatorId.unpack(data[:4]))
            return self._AttributesFactory(data[length:])

        if code == AttributeID.PMSI_TUNNEL:
            logger.parser('parsing pmsi-tunnel')
            self.attributes.add(PMSITunnel.unpack(data[:length]))
            return self._AttributesFactory(data[length:])

        if code == AttributeID.ATOMIC_AGGREGATE:
            logger.parser('ignoring atomic-aggregate')
            return self._AttributesFactory(data[length:])

        if code == AttributeID.AGGREGATOR:
            logger.parser('ignoring aggregator')
            return self._AttributesFactory(data[length:])

        if code == AttributeID.AS4_AGGREGATOR:
            logger.parser('ignoring as4_aggregator')
            return self._AttributesFactory(data[length:])

        if code == AttributeID.COMMUNITY:
            logger.parser('parsing communities')
            self.attributes.add(self.__new_communities(data[:length]))
            return self._AttributesFactory(data[length:])

        if code == AttributeID.EXTENDED_COMMUNITY:
            logger.parser('parsing communities')
            self.attributes.add(self.__new_extended_communities(data[:length]))
            return self._AttributesFactory(data[length:])

        if code == AttributeID.MP_UNREACH_NLRI:
            logger.parser('parsing multi-protocol nlri unreacheable')
            next_attributes = data[length:]
            data = data[:length]
            afi, safi = unpack('!HB', data[:3])
            offset = 3
            # See RFC 5549 for better support
            if not afi in (AFI.ipv4, AFI.ipv6, AFI.l2vpn) or (not safi in (
                    SAFI.unicast, SAFI.mpls_vpn, SAFI.rtc, SAFI.evpn)):
                #self.log.out('we only understand IPv4/IPv6 and should never have received this MP_UNREACH_NLRI (%s %s)' % (afi,safi))
                raise Exception(
                    "Unsupported AFI/SAFI received !! not supposed to happen here..."
                )
                return self._AttributesFactory(next_attributes)
            data = data[offset:]
            while data:

                if safi == SAFI.unicast:
                    route = ReceivedRoute(BGPPrefix(afi, data), 'withdraw')
                elif (afi == AFI.ipv4 and safi == SAFI.mpls_vpn):
                    route = ReceivedRoute(
                        VPNLabelledPrefix.unpack(afi, safi, data), 'withdraw')
                elif (afi == AFI.ipv4 and safi == SAFI.rtc):
                    route = ReceivedRoute(
                        RouteTargetConstraint.unpack(afi, safi, data),
                        'withdraw')
                elif (afi == AFI.l2vpn and safi == SAFI.evpn):
                    route = ReceivedRoute(EVPNNLRI.unpack(data), 'withdraw')
                else:
                    raise Exception("Unsupported AFI/SAFI combination !!")
                    return self._AttributesFactory(next_attributes)

                data = data[len(route.nlri):]
                self.mp_routes.append(route)
            return self._AttributesFactory(next_attributes)

        if code == AttributeID.MP_REACH_NLRI:
            logger.parser('parsing multi-protocol nlri reacheable')
            next_attributes = data[length:]
            data = data[:length]
            afi, safi = unpack('!HB', data[:3])
            offset = 3

            if not afi in (AFI.ipv4, AFI.ipv6, AFI.l2vpn) or (not safi in (
                    SAFI.unicast, SAFI.mpls_vpn, SAFI.rtc, SAFI.evpn)):
                #self.log.out('we only understand IPv4/IPv6 and should never have received this MP_REACH_NLRI (%s %s)' % (afi,safi))
                raise Exception(
                    "Unsupported AFI/SAFI received !! not supposed to happen here..."
                )
                return self._AttributesFactory(next_attributes)
            len_nh = ord(data[offset])
            offset += 1
            if afi == AFI.ipv4 and safi in (
                    SAFI.unicast, ) and not len_nh == 4:
                # We are not following RFC 4760 Section 7 (deleting route and possibly tearing down the session)
                #self.log.out('bad IPv4 next-hop length (%d)' % len_nh)
                return self._AttributesFactory(next_attributes)
            if afi == AFI.ipv6 and safi in (
                    SAFI.unicast, ) and not len_nh in (16, 32):
                # We are not following RFC 4760 Section 7 (deleting route and possibly tearing down the session)
                #self.log.out('bad IPv6 next-hop length (%d)' % len_nh)
                return self._AttributesFactory(next_attributes)
            nh = data[offset:offset + len_nh]
            offset += len_nh
            if len_nh == 32:
                # we have a link-local address in the next-hop we ideally need to ignore
                if nh[0] == chr(0xfe): nh = nh[16:]
                elif nh[16] == chr(0xfe):
                    nh = nh[:16]
                    # We are not following RFC 4760 Section 7 (deleting route and possibly tearing down the session)
                else:
                    return self._AttributesFactory(next_attributes)
            if len_nh >= 16: nh = socket.inet_ntop(socket.AF_INET6, nh)
            else:

                if (safi in (SAFI.mpls_vpn, )):
                    # the next-hop is preceded by an rdtype and a 6-byte RD, we don't care about the RD yet
                    nh = socket.inet_ntop(socket.AF_INET, nh[8:])
                else:
                    nh = socket.inet_ntop(socket.AF_INET, nh)

            nb_snpa = ord(data[offset])
            offset += 1
            snpas = []
            for _ in range(nb_snpa):
                len_snpa = ord(offset)
                offset += 1
                snpas.append(data[offset:offset + len_snpa])
                offset += len_snpa
            data = data[offset:]
            while data:
                if safi == SAFI.unicast:
                    route = ReceivedRoute(BGPPrefix(afi, data), 'announce')
                elif (afi == AFI.ipv4 and safi == SAFI.mpls_vpn):
                    route = ReceivedRoute(
                        VPNLabelledPrefix.unpack(afi, safi, data), 'announce')
                elif (afi == AFI.ipv4 and safi == SAFI.rtc):
                    route = ReceivedRoute(
                        RouteTargetConstraint.unpack(afi, safi, data),
                        'announce')
                elif (afi == AFI.l2vpn and safi == SAFI.evpn):
                    route = ReceivedRoute(EVPNNLRI.unpack(data), 'announce')
                else:
                    raise Exception("Unsupported AFI/SAFI combination !!")
                    return self._AttributesFactory(next_attributes)

                data = data[len(route.nlri):]
                route.attributes = self.attributes
                route.attributes.add(NextHop(to_IP(nh)))
                self.mp_routes.append(route)
            return self._AttributesFactory(next_attributes)

        logger.warning(
            "ignoring attributes of type %s %s" %
            (str(code), [hex(ord(_)) for _ in data]), 'parsing')
        return self._AttributesFactory(data[length:])
Exemple #4
0
class Protocol (object):
	decode = True
	strict = False

	def __init__ (self,peer,connection=None):
		self.peer = peer
		self.neighbor = peer.neighbor
		self.connection = connection
		self._delta = Delta(Table(peer))
		self._asn4 = False
		self._messages = {}
		self._frozen = 0
		self.message_size = 4096

	# XXX: we use self.peer.neighbor.peer_address when we could use self.neighbor.peer_address

	def me (self,message):
		return "Peer %15s ASN %-7s %s" % (self.peer.neighbor.peer_address,self.peer.neighbor.peer_as,message)

	def connect (self):
		# allows to test the protocol code using modified StringIO with a extra 'pending' function
		if not self.connection:
			peer = self.neighbor.peer_address
			local = self.neighbor.local_address
			md5 = self.neighbor.md5
			ttl = self.neighbor.ttl
			self.connection = Connection(peer,local,md5,ttl)

			message = 'neighbor %s connected\n' % self.peer.neighbor.peer_address
			try:
				proc = self.peer.supervisor.processes
				for name in proc.notify(self.neighbor.peer_address):
					proc.write(name,message)
			except ProcessError:
				raise Failure('Could not send message(s) to helper program(s) : %s' % message)

	def check_keepalive (self):
		left = int (self.connection.last_read  + self.neighbor.hold_time - time.time())
		if left <= 0:
			raise Notify(4,0)
		return left

	def close (self):
		#self._delta.last = 0
		if self.connection:
			# must be first otherwise we could have a loop caused by the raise in the below
			self.connection.close()
			self.connection = None

			message = 'neighbor %s down\n' % self.peer.neighbor.peer_address
			try:
				proc = self.peer.supervisor.processes
				for name in proc.notify(self.neighbor.peer_address):
					proc.write(name,message)
			except ProcessError:
				raise Failure('Could not send message(s) to helper program(s) : %s' % message)

	# Read from network .......................................................

	def read_message (self):
		# This call reset the time for the timeout in
		if not self.connection.pending(True):
			return NOP('')

		length = 19
		data = ''
		while length:
			if self.connection.pending():
				delta = self.connection.read(length)
				data += delta
				length -= len(delta)
				# The socket is closed
				if not data:
					raise Failure('The TCP connection is closed')

		if data[:16] != Message.MARKER:
			# We are speaking BGP - send us a valid Marker
			raise Notify(1,1,'The packet received does not contain a BGP marker')

		raw_length = data[16:18]
		length = unpack('!H',raw_length)[0]
		msg = data[18]

		if ( length < 19 or length > 4096):
			# BAD Message Length
			raise Notify(1,2)

		if (
			(msg == Open.TYPE and length < 29) or
			(msg == Update.TYPE and length < 23) or
			(msg == Notification.TYPE and length < 21) or
			(msg == KeepAlive.TYPE and length != 19)
		):
			# MUST send the faulty length back
			raise Notify(1,2,raw_length)
			#(msg == RouteRefresh.TYPE and length != 23)

		length -= 19
		data = ''
		while length:
			if self.connection.pending():
				delta = self.connection.read(length)
				data += delta
				length -= len(delta)
				# The socket is closed
				if not data:
					raise Failure('The TCP connection is closed')

		if msg == Notification.TYPE:
			raise Notification(ord(data[0]),ord(data[1]))

		if msg == KeepAlive.TYPE:
			return self.KeepAliveFactory(data)

		if msg == Open.TYPE:
			return self.OpenFactory(data)

		if msg == Update.TYPE:
			if self.neighbor.parse_routes:
				update = self.UpdateFactory(data)
				return update
			else:
				return NOP('')

		if self.strict:
			raise Notify(1,3,msg)

		return NOP(data)

	def read_open (self,_open,ip):
		message = self.read_message()

		if message.TYPE == NOP.TYPE:
			return message

		if message.TYPE != Open.TYPE:
			raise Notify(5,1,'The first packet received is not an open message (%s)' % message)

		if _open.asn.asn4() and not message.capabilities.announced(Capabilities.FOUR_BYTES_ASN):
			raise Notify(2,0,'We have an ASN4 and you do not speak it. bye.')

		self._asn4 = message.capabilities.announced(Capabilities.FOUR_BYTES_ASN)

		if message.asn == AS_TRANS:
			peer_as = message.capabilities[Capabilities.FOUR_BYTES_ASN]
		else:
			peer_as = message.asn

		if peer_as != self.neighbor.peer_as:
			raise Notify(2,2,'ASN in OPEN (%d) did not match ASN expected (%d)' % (message.asn,self.neighbor.peer_as))

		# RFC 6286 : http://tools.ietf.org/html/rfc6286
		#if message.router_id == RouterID('0.0.0.0'):
		#	message.router_id = RouterID(ip)
		if message.router_id == RouterID('0.0.0.0'):
			raise Notify(2,3,'0.0.0.0 is an invalid router_id according to RFC6286')
		if message.router_id == self.neighbor.router_id and message.asn == self.neighbor.local_as:
			raise Notify(2,3,'BGP Indendifier collision (%s) on IBGP according to RFC 6286' % message.router_id)

		if message.hold_time < 3:
			raise Notify(2,6,'Hold Time is invalid (%d)' % message.hold_time)
		if message.hold_time >= 3:
			self.neighbor.hold_time = min(self.neighbor.hold_time,message.hold_time)

		# XXX: Does not work as the capa is not yet defined
		if message.capabilities.announced(Capabilities.EXTENDED_MESSAGE):
			# untested !
			if self.peer.bgp.message_size:
				self.message_size = self.peer.bgp.message_size

# README: This limit what we are announcing may cause some issue if you add new family and SIGHUP
# README: So it is commented until I make my mind to add it or not (as Juniper complain about mismatch capabilities)
#		# Those are the capacity we need to announce those routes
#		for family in _open.capabilities[Capabilities.MULTIPROTOCOL_EXTENSIONS]:
#			# if the peer does not support them, tear down the session
#			if family not in message.capabilities[Capabilities.MULTIPROTOCOL_EXTENSIONS]:
#				afi,safi = family
#				raise Notify(2,0,'Peers does not speak %s %s' % (afi,safi))

		return message

	def read_keepalive (self):
		message = self.read_message()
		if message.TYPE == NOP.TYPE:
			return message
		if message.TYPE != KeepAlive.TYPE:
			raise Notify(5,2)
		return message

	# Sending message to peer .................................................

	# we do not buffer those message in purpose

	def new_open (self,restarted,asn4):
		if asn4:
			asn = self.neighbor.local_as
		else:
			asn = AS_TRANS

		o = Open(4,asn,self.neighbor.router_id.ip,Capabilities().default(self.neighbor,restarted),self.neighbor.hold_time)

		if not self.connection.write(o.message()):
			raise Failure('Could not send open')
		return o

	def new_keepalive (self,force=False):
		left = int(self.connection.last_write + self.neighbor.hold_time.keepalive() - time.time())
		k = KeepAlive()
		m = k.message()
		if force:
			written = self.connection.write(k.message())
			if not written:
				logger.message(self.me(">> KEEPALIVE buffered"))
				self._messages[self.neighbor.peer_as].append(('KEEPALIVE',m))
			else:
				self._frozen = 0
			return left,k
		if left <= 0:
			written = self.connection.write(k.message())
			if not written:
				logger.message(self.me(">> KEEPALIVE buffered"))
				self._messages[self.neighbor.peer_as].append(('KEEPALIVE',m))
			else:
				self._frozen = 0
			return left,k
		return left,None

	def new_notification (self,notification):
		return self.connection.write(notification.message())

	# messages buffered in case of failure

	def buffered (self):
		return self._messages.get(self.neighbor.peer_as,[]) != []

	def _backlog (self,maximum=0):
		backlog = self._messages.get(self.neighbor.peer_as,[])
		if backlog:
			if not self._frozen:
				self._frozen = time.time()
			if self._frozen and self._frozen + (self.neighbor.hold_time) < time.time():
				raise Failure('peer %s not reading on socket - killing session' % self.neighbor.peer_as)
			logger.message(self.me("unable to send route for %d second (maximum allowed %d)" % (time.time()-self._frozen,self.neighbor.hold_time)))
			nb_backlog = len(backlog)
			if nb_backlog > MAX_BACKLOG:
				raise Failure('over %d routes buffered for peer %s - killing session' % (MAX_BACKLOG,self.neighbor.peer_as))
			logger.message(self.me("backlog of %d/%d routes" % (nb_backlog,MAX_BACKLOG)))
		count = 0
		while backlog:
			count += 1
			name,update = backlog[0]
			written = self.connection.write(update)
			if not written:
				break
			logger.message(self.me(">> DEBUFFERED %s" % name))
			backlog.pop(0)
			self._frozen = 0
			yield count
			if maximum and count >= maximum:
				break
		self._messages[self.neighbor.peer_as] = backlog

	def _announce (self,name,generator):
		def chunked (generator,size):
			chunk = ''
			for data in generator:
				if len(data) > size:
					raise Failure('Can not send BGP update larger than %d bytes on this connection.' % size)
				if len(chunk) + len(data) <= size:
					chunk += data
					continue
				yield chunk
				chunk = data
			if chunk:
				yield chunk

		count = 0
		# The message size is the whole BGP message INCLUDING headers !
		for update in chunked(generator,self.message_size-19):
			count += 1
			if self._messages[self.neighbor.peer_as]:
				logger.message(self.me(">> %s could not be sent, some messages are still in the buffer" % name))
				self._messages[self.neighbor.peer_as].append((name,update))
				continue
			written = self.connection.write(update)
			if not written:
				logger.message(self.me(">> %s buffered" % name))
				self._messages[self.neighbor.peer_as].append((name,update))
			yield count

	def new_announce (self):
		for answer in self._backlog():
			yield answer
		asn4 = not not self.peer.open.capabilities.announced(Capabilities.FOUR_BYTES_ASN)
		for answer in self._announce('UPDATE',self._delta.announce(asn4,self.neighbor.local_as,self.neighbor.peer_as)):
			yield answer

	def new_update (self):
		for answer in self._backlog():
			yield answer
		asn4 = not not self.peer.open.capabilities.announced(Capabilities.FOUR_BYTES_ASN)
		for answer in self._announce('UPDATE',self._delta.update(asn4,self.neighbor.local_as,self.neighbor.peer_as)):
			yield answer

	def new_eors (self,families):
		for answer in self._backlog():
			pass
		eor = EOR()
		eors = eor.eors(families)
		for answer in self._announce('EOR',eors):
			pass

	# Message Factory .................................................

	def KeepAliveFactory (self,data):
		return KeepAlive()

	def _key_values (self,name,data):
		if len(data) < 2:
			raise Notify(2,0,"Bad length for OPEN %s (<2) %s" % (name,hexa(data)))
		l = ord(data[1])
		boundary = l+2
		if len(data) < boundary:
			raise Notify(2,0,"Bad length for OPEN %s (buffer underrun) %s" % (name,hexa(data)))
		key = ord(data[0])
		value = data[2:boundary]
		rest = data[boundary:]
		return key,value,rest

	def CapabilitiesFactory (self,data):
		capabilities = Capabilities()
		option_len = ord(data[0])
		if option_len:
			data = data[1:]
			while data:
				key,value,data = self._key_values('parameter',data)
				# Paramaters must only be sent once.
				if key == Parameter.AUTHENTIFICATION_INFORMATION:
					raise Notify(2,5)

				if key == Parameter.CAPABILITIES:
					while value:
						k,capv,value = self._key_values('capability',value)
						# Multiple Capabilities can be present in a single attribute
						#if r:
						#	raise Notify(2,0,"Bad length for OPEN %s (size mismatch) %s" % ('capability',hexa(value)))

						if k == Capabilities.MULTIPROTOCOL_EXTENSIONS:
							if k not in capabilities:
								capabilities[k] = MultiProtocol()
							afi = AFI(unpack('!H',capv[:2])[0])
							safi = SAFI(ord(capv[3]))
							capabilities[k].append((afi,safi))
							continue

						if k == Capabilities.GRACEFUL_RESTART:
							restart = unpack('!H',capv[:2])[0]
							restart_flag = restart >> 12
							restart_time = restart & Graceful.TIME_MASK
							value_gr = capv[2:]
							families = []
							while value_gr:
								afi = AFI(unpack('!H',value_gr[:2])[0])
								safi = SAFI(ord(value_gr[2]))
								flag_family = ord(value_gr[0])
								families.append((afi,safi,flag_family))
								value_gr = value_gr[4:]
							capabilities[k] = Graceful(restart_flag,restart_time,families)
							continue

						if k == Capabilities.FOUR_BYTES_ASN:
							capabilities[k] = ASN(unpack('!L',capv[:4])[0])
							continue

						if k == Capabilities.ROUTE_REFRESH:
							capabilities[k] = RouteRefresh()
							continue

						if k == Capabilities.CISCO_ROUTE_REFRESH:
							capabilities[k] = CiscoRouteRefresh()
							continue

						if k == Capabilities.MULTISESSION_BGP:
							capabilities[k] = MultiSession()
							continue
						if k == Capabilities.MULTISESSION_BGP_RFC:
							capabilities[k] = MultiSession()
							continue

						if k not in capabilities:
							capabilities[k] = Unknown(k,[ord(_) for _ in capv])
				else:
					raise Notify(2,0,'Unknown OPEN parameter %s' % hex(key))
		return capabilities

	def OpenFactory (self,data):
		version = ord(data[0])
		if version != 4:
			# Only version 4 is supported nowdays..
			raise Notify(2,1,data[0])
		asn = unpack('!H',data[1:3])[0]
		hold_time = unpack('!H',data[3:5])[0]
		numeric = unpack('!L',data[5:9])[0]
		router_id = "%d.%d.%d.%d" % (numeric>>24,(numeric>>16)&0xFF,(numeric>>8)&0xFF,numeric&0xFF)
		capabilities = self.CapabilitiesFactory(data[9:])
		return Open(version,asn,router_id,capabilities,hold_time)


	def UpdateFactory (self,data):
		length = len(data)
		# withdrawn
		lw,withdrawn,data = defix(data)
		if len(withdrawn) != lw:
			raise Notify(3,1)
		la,attribute,announced = defix(data)
		if len(attribute) != la:
			raise Notify(3,1)
		# The RFC check ...
		#if lw + la + 23 > length:
		if 2 + lw + 2+ la + len(announced) != length:
			raise Notify(3,1)

		routes = []
		while withdrawn:
			nlri = BGPPrefix(AFI.ipv4,withdrawn)
			route = ReceivedRoute(nlri,'withdraw')
			withdrawn = withdrawn[len(nlri):]
			routes.append(route)

		self.mp_routes = []
		attributes = self.AttributesFactory(attribute)
		routes.extend(self.mp_routes)

		while announced:
			nlri = BGPPrefix(AFI.ipv4,announced)
			route = ReceivedRoute(nlri,'announce')
			# XXX: Should this be a deep copy
			route.attributes = attributes
			announced = announced[len(nlri):]
			routes.append(route)
			#logger.info(self.me('Received route %s' % nlri))

		#print "routes", routes
		#print "attributes", attributes

		if routes:
			return Update(routes)
		return NOP('')

	def AttributesFactory (self,data):
		try:
			self.attributes = Attributes()
			return self._AttributesFactory(data).attributes
		except IndexError:
			raise Notify(3,2,data)

	def __new_ASPath (self,data,asn4=False):
		
		if len(data) == 0:
			return ASPath(asn4)
		
		if asn4:
			size = 4
			decoder = 'L' # could it be 'I' as well ?
		else:
			size = 2
			decoder = 'H'
		stype = ord(data[0])
		slen = ord(data[1])
		sdata = data[2:2+(slen*size)]
		
		ASPS = ASPath(asn4,stype)
		format = '!'+(decoder*slen)
		for c in unpack(format,sdata):
			ASPS.add(c)
		return ASPS

	def __new_AS4Path (self,data):
		stype = ord(data[0])
		slen = ord(data[1])
		sdata = data[2:2+(slen*4)]

		ASPS = AS4Path(stype)
		format = '!'+('L'*slen)
		for c in unpack(format,sdata):
			ASPS.add(c)
		return ASPS

	def __merge_attributes (self):
		as2path = self.attributes[AttributeID.AS_PATH]
		as4path = self.attributes[AttributeID.AS4_PATH]
		newASPS = ASPath(True,as2path.asptype)
		len2 = len(as2path.aspsegment)
		len4 = len(as4path.aspsegment)

		if len2 < len4:
			for asn in as4path.aspsegment:
				newASPS.add(asn)
		else:
			for asn in as2path.aspsegment[:-len4]:
				newASPS.add(asn)
			for asn in as4path.aspsegment:
				newASPS.add(asn)

		self.attributes.remove(AttributeID.AS_PATH)
		self.attributes.remove(AttributeID.AS4_PATH)
		self.attributes.add(newASPS)

		#raise Notify(3,1,'could not merge AS4_PATH in AS_PATH')

	def __new_communities (self,data):
		communities = Communities()
		while data:
			community = unpack('!L',data[:4])[0]
			data = data[4:]
			if data and len(data) < 4:
				raise Notify(3,1,'could not decode community %s' % str([hex(ord(_)) for _ in data]))
			communities.add(Community(community))
		return communities

	def __new_extended_communities (self,data):
		communities = ECommunities()
		while data:
			community = data[:8]
			data = data[8:]
			if data and len(data) < 8:
				raise Notify(3,1,'could not decode extended community %s' % str([hex(ord(_)) for _ in data]))
			communities.add(ECommunity.unpackFrom(community))
		return communities

	def _AttributesFactory (self,data):
		if not data:
			return self

		# We do not care if the attribute are transitive or not as we do not redistribute
		flag = Flag(ord(data[0]))
		code = AttributeID(ord(data[1]))

		if flag & Flag.EXTENDED_LENGTH:
			length = unpack('!H',data[2:4])[0]
			offset = 4
		else:
			length = ord(data[2])
			offset = 3

		data = data[offset:]

#		if not length:
#			return self._AttributesFactory(data[length:])

		# XXX: This code does not make sure that attributes are unique - or does it ?

		if code == AttributeID.ORIGIN:
			logger.parser('parsing origin')
			self.attributes.add(Origin(ord(data[0])))
			return self._AttributesFactory(data[length:])

		if code == AttributeID.AS_PATH:
			logger.parser('parsing as_path')
			self.attributes.add(self.__new_ASPath(data[:length],self._asn4))
			if not self._asn4 and self.attributes.has(AttributeID.AS4_PATH):
				self.__merge_attributes()
			return self._AttributesFactory(data[length:])

		if code == AttributeID.AS4_PATH:
			logger.parser('parsing as_path')
			self.attributes.add(self.__new_AS4Path(data[:length]))
			if not self._asn4 and self.attributes.has(AttributeID.AS_PATH):
				self.__merge_attributes()
			return self._AttributesFactory(data[length:])

		if code == AttributeID.NEXT_HOP:
			logger.parser('parsing next-hop')
			self.attributes.add(NextHop(Inet(AFI.ipv4,data[:4])))
			return self._AttributesFactory(data[length:])

		if code == AttributeID.MED:
			logger.parser('parsing med')
			self.attributes.add(MED(unpack('!L',data[:4])[0]))
			return self._AttributesFactory(data[length:])

		if code == AttributeID.LOCAL_PREF:
			logger.parser('parsing local-preference')
			self.attributes.add(LocalPreference(unpack('!L',data[:4])[0]))
			return self._AttributesFactory(data[length:])
		
		if code == AttributeID.ORIGINATOR_ID:
			logger.parser('parsing originator-id')
			self.attributes.add(OriginatorId.unpack(data[:4]))
			return self._AttributesFactory(data[length:])

		if code == AttributeID.PMSI_TUNNEL:
			logger.parser('parsing pmsi-tunnel')
			self.attributes.add(PMSITunnel.unpack(data[:length]))
			return self._AttributesFactory(data[length:])

		if code == AttributeID.ATOMIC_AGGREGATE:
			logger.parser('ignoring atomic-aggregate')
			return self._AttributesFactory(data[length:])

		if code == AttributeID.AGGREGATOR:
			logger.parser('ignoring aggregator')
			return self._AttributesFactory(data[length:])

		if code == AttributeID.AS4_AGGREGATOR:
			logger.parser('ignoring as4_aggregator')
			return self._AttributesFactory(data[length:])

		if code == AttributeID.COMMUNITY:
			logger.parser('parsing communities')
			self.attributes.add(self.__new_communities(data[:length]))
			return self._AttributesFactory(data[length:])

		if code == AttributeID.EXTENDED_COMMUNITY:
			logger.parser('parsing communities')
			self.attributes.add(self.__new_extended_communities(data[:length]))
			return self._AttributesFactory(data[length:])

		if code == AttributeID.MP_UNREACH_NLRI:
			logger.parser('parsing multi-protocol nlri unreacheable')
			next_attributes = data[length:]
			data = data[:length]
			afi,safi = unpack('!HB',data[:3])
			offset = 3
			# See RFC 5549 for better support
			if not afi in (AFI.ipv4,AFI.ipv6,AFI.l2vpn) or (not safi in (SAFI.unicast, SAFI.mpls_vpn, SAFI.rtc,SAFI.evpn)):
				#self.log.out('we only understand IPv4/IPv6 and should never have received this MP_UNREACH_NLRI (%s %s)' % (afi,safi))
				raise Exception("Unsupported AFI/SAFI received !! not supposed to happen here...")
				return self._AttributesFactory(next_attributes)
			data = data[offset:]
			while data:
				
				if safi == SAFI.unicast:
					route = ReceivedRoute(BGPPrefix(afi,data),'withdraw')
				elif (afi == AFI.ipv4 and safi == SAFI.mpls_vpn):
					route = ReceivedRoute(VPNLabelledPrefix.unpack(afi,safi,data),'withdraw')
				elif (afi == AFI.ipv4 and safi == SAFI.rtc):
					route = ReceivedRoute(RouteTargetConstraint.unpack(afi,safi,data),'withdraw')
				elif (afi == AFI.l2vpn and safi == SAFI.evpn):
					route = ReceivedRoute(EVPNNLRI.unpack(data) ,'withdraw')
				else:
					raise Exception("Unsupported AFI/SAFI combination !!")
					return self._AttributesFactory(next_attributes)
				
				data = data[len(route.nlri):]
				self.mp_routes.append(route)
			return self._AttributesFactory(next_attributes)

		if code == AttributeID.MP_REACH_NLRI:
			logger.parser('parsing multi-protocol nlri reacheable')
			next_attributes = data[length:]
			data = data[:length]
			afi,safi = unpack('!HB',data[:3])
			offset = 3
			
			if not afi in (AFI.ipv4,AFI.ipv6,AFI.l2vpn) or (not safi in (SAFI.unicast,SAFI.mpls_vpn,SAFI.rtc,SAFI.evpn)):
				#self.log.out('we only understand IPv4/IPv6 and should never have received this MP_REACH_NLRI (%s %s)' % (afi,safi))
				raise Exception("Unsupported AFI/SAFI received !! not supposed to happen here...")
				return self._AttributesFactory(next_attributes)
			len_nh = ord(data[offset])
			offset += 1
			if afi == AFI.ipv4 and safi in (SAFI.unicast,) and not len_nh == 4: 
				# We are not following RFC 4760 Section 7 (deleting route and possibly tearing down the session)
				#self.log.out('bad IPv4 next-hop length (%d)' % len_nh)
				return self._AttributesFactory(next_attributes)
			if afi == AFI.ipv6 and safi in (SAFI.unicast,) and not len_nh in (16,32):
				# We are not following RFC 4760 Section 7 (deleting route and possibly tearing down the session)
				#self.log.out('bad IPv6 next-hop length (%d)' % len_nh)
				return self._AttributesFactory(next_attributes)
			nh = data[offset:offset+len_nh]
			offset += len_nh
			if len_nh == 32:
				# we have a link-local address in the next-hop we ideally need to ignore
				if nh[0] == chr(0xfe): nh = nh[16:]
				elif nh[16] == chr(0xfe): nh = nh[:16]
				# We are not following RFC 4760 Section 7 (deleting route and possibly tearing down the session)
				else: return self._AttributesFactory(next_attributes)
			if len_nh >= 16: nh = socket.inet_ntop(socket.AF_INET6,nh)
			else:
				
				if (safi in (SAFI.mpls_vpn,)):
					# the next-hop is preceded by an rdtype and a 6-byte RD, we don't care about the RD yet
					nh = socket.inet_ntop(socket.AF_INET,nh[8:])
				else:
					nh = socket.inet_ntop(socket.AF_INET,nh)
			
			nb_snpa = ord(data[offset])
			offset += 1
			snpas = []
			for _ in range(nb_snpa):
				len_snpa = ord(offset)
				offset += 1
				snpas.append(data[offset:offset+len_snpa])
				offset += len_snpa
			data = data[offset:]
			while data:
				if safi == SAFI.unicast:
					route = ReceivedRoute(BGPPrefix(afi,data),'announce')
				elif (afi == AFI.ipv4 and safi == SAFI.mpls_vpn):
					route = ReceivedRoute(VPNLabelledPrefix.unpack(afi,safi,data) ,'announce')
				elif (afi == AFI.ipv4 and safi == SAFI.rtc):
					route = ReceivedRoute(RouteTargetConstraint.unpack(afi,safi,data) ,'announce')
				elif (afi == AFI.l2vpn and safi == SAFI.evpn):
					route = ReceivedRoute(EVPNNLRI.unpack(data) ,'announce')
				else:
					raise Exception("Unsupported AFI/SAFI combination !!")
					return self._AttributesFactory(next_attributes)
										
				data = data[len(route.nlri):]
				route.attributes = self.attributes
				route.attributes.add(NextHop(to_IP(nh)))
				self.mp_routes.append(route)
			return self._AttributesFactory(next_attributes)

		logger.warning("ignoring attributes of type %s %s" % (str(code),[hex(ord(_)) for _ in data]),'parsing')
		return self._AttributesFactory(data[length:])
    def _initiateConnection(self):
        self.log.debug("Initiate ExaBGP connection to %s from %s",
                       self.peerAddress, self.localAddress)

        self.rtc_active = False

        neighbor = Neighbor()
        neighbor.router_id = RouterID(self.config['local_address'])
        neighbor.local_as = self.config['my_as']
        neighbor.peer_as = self.config['peer_as']
        neighbor.local_address = self.config['local_address']
        neighbor.peer_address = self.peerAddress
        neighbor.parse_routes = True

        # create dummy objects to fake exabgp into talking with us
        peer = FakePeer(neighbor)
        local = FakeLocal(self.localAddress)

        try:
            self.connection = Connection(peer, local, None, None)
        except Failure as e:
            raise InitiateConnectionException(repr(e))

        self.log.debug("Instantiate ExaBGP Protocol")
        self.protocol = MyBGPProtocol(peer, self.connection)
        self.protocol.connect()

        # this is highly similar to exabgp.network.peer._run

        o = self.protocol.new_open(
            False, False, self.config, ExaBGPPeerWorker.enabledFamilies)

        self.log.debug("Send open: [%s]", o)
        self.fsm.state = FSM.OpenSent

        count = 0
        self.log.debug("Wait for open...")
        while not self.shouldStop:
            # FIXME: we should time-out here, at some point
            message = self.protocol.read_open(o, None)

            count += 1
            if isinstance(message, NOP):
                # TODO(tmmorin): check compliance with BGP specs...
                if count > 20:
                    self.connection.close()
                    # FIXME: this should be moved to
                    # BGPPeerWorker in a more generic way
                    # (+ send Notify when needed)
                    raise OpenWaitTimeout("%ds" % int(20 * 0.5))
                sleep(0.5)
                continue

            self.log.debug("Read message: %s", message)

            if isinstance(message, Open):
                break
            else:
                self.log.error("Received unexpected message: %s", message)
                # FIXME

        if self.shouldStop:
            raise StoppedException()

        # An Open was received
        received_open = message

        self._setHoldTime(received_open.hold_time)

        # Hack to ease troubleshooting, have the real peer address appear in
        # the logs when fakerr is used
        if received_open.router_id.ip != self.peerAddress:
            self.log.info("changing thread name from %s to BGP-x%s, based on"
                          " the router-id advertized in Open (different from"
                          " peerAddress == %s)", self.name,
                          received_open.router_id.ip, self.peerAddress)
            self.name = "BGP-%s:%s" % (self.peerAddress,
                                       received_open.router_id.ip)

        try:
            mp_capabilities = received_open.capabilities[
                Capabilities.MULTIPROTOCOL_EXTENSIONS]
        except Exception:
            mp_capabilities = []

        # check that our peer advertized at least mpls_vpn and evpn
        # capabilities
        self._activeFamilies = []
        for (afi, safi) in (ExaBGPPeerWorker.enabledFamilies +
                            [(AFI(AFI.ipv4), SAFI(SAFI.rtc))]):
            if (afi, safi) not in mp_capabilities:
                self.log.warning(
                    "Peer does not advertise (%s,%s) capability", afi, safi)
            else:
                self.log.info(
                    "Family (%s,%s) successfully negotiated with peer %s",
                    afi, safi, self.peerAddress)
                self._activeFamilies.append((afi, safi))

        if len(self._activeFamilies) == 0:
            self.log.error("No family was negotiated for VPN routes")

        # proceed BGP session

        self.connection.io.setblocking(1)

        self.enqueue(SendKeepAlive)

        self.fsm.state = FSM.OpenConfirm

        self.rtc_active = False

        if self.config['enable_rtc']:
            if (AFI(AFI.ipv4), SAFI(SAFI.rtc)) in mp_capabilities:
                self.log.info(
                    "RTC successfully enabled with peer %s", self.peerAddress)
                self.rtc_active = True
            else:
                self.log.warning(
                    "enable_rtc True but peer not configured for RTC")
class ExaBGPPeerWorker(BGPPeerWorker, LookingGlass):

    enabledFamilies = [(AFI(AFI.ipv4), SAFI(SAFI.mpls_vpn)),
                       # (AFI(AFI.ipv6), SAFI(SAFI.mpls_vpn)),
                       (AFI(AFI.l2vpn), SAFI(SAFI.evpn))]

    def __init__(self, bgpManager, name, peerAddress, config):
        BGPPeerWorker.__init__(self, bgpManager, name, peerAddress)
        self.config = config
        self.localAddress = self.config['local_address']
        self.peerAddress = peerAddress

        self.connection = None

        self.rtc_active = False
        self._activeFamilies = []

    def _toIdle(self):
        self._activeFamilies = []

    def _initiateConnection(self):
        self.log.debug("Initiate ExaBGP connection to %s from %s",
                       self.peerAddress, self.localAddress)

        self.rtc_active = False

        neighbor = Neighbor()
        neighbor.router_id = RouterID(self.config['local_address'])
        neighbor.local_as = self.config['my_as']
        neighbor.peer_as = self.config['peer_as']
        neighbor.local_address = self.config['local_address']
        neighbor.peer_address = self.peerAddress
        neighbor.parse_routes = True

        # create dummy objects to fake exabgp into talking with us
        peer = FakePeer(neighbor)
        local = FakeLocal(self.localAddress)

        try:
            self.connection = Connection(peer, local, None, None)
        except Failure as e:
            raise InitiateConnectionException(repr(e))

        self.log.debug("Instantiate ExaBGP Protocol")
        self.protocol = MyBGPProtocol(peer, self.connection)
        self.protocol.connect()

        # this is highly similar to exabgp.network.peer._run

        o = self.protocol.new_open(
            False, False, self.config, ExaBGPPeerWorker.enabledFamilies)

        self.log.debug("Send open: [%s]", o)
        self.fsm.state = FSM.OpenSent

        count = 0
        self.log.debug("Wait for open...")
        while not self.shouldStop:
            # FIXME: we should time-out here, at some point
            message = self.protocol.read_open(o, None)

            count += 1
            if isinstance(message, NOP):
                # TODO(tmmorin): check compliance with BGP specs...
                if count > 20:
                    self.connection.close()
                    # FIXME: this should be moved to
                    # BGPPeerWorker in a more generic way
                    # (+ send Notify when needed)
                    raise OpenWaitTimeout("%ds" % int(20 * 0.5))
                sleep(0.5)
                continue

            self.log.debug("Read message: %s", message)

            if isinstance(message, Open):
                break
            else:
                self.log.error("Received unexpected message: %s", message)
                # FIXME

        if self.shouldStop:
            raise StoppedException()

        # An Open was received
        received_open = message

        self._setHoldTime(received_open.hold_time)

        # Hack to ease troubleshooting, have the real peer address appear in
        # the logs when fakerr is used
        if received_open.router_id.ip != self.peerAddress:
            self.log.info("changing thread name from %s to BGP-x%s, based on"
                          " the router-id advertized in Open (different from"
                          " peerAddress == %s)", self.name,
                          received_open.router_id.ip, self.peerAddress)
            self.name = "BGP-%s:%s" % (self.peerAddress,
                                       received_open.router_id.ip)

        try:
            mp_capabilities = received_open.capabilities[
                Capabilities.MULTIPROTOCOL_EXTENSIONS]
        except Exception:
            mp_capabilities = []

        # check that our peer advertized at least mpls_vpn and evpn
        # capabilities
        self._activeFamilies = []
        for (afi, safi) in (ExaBGPPeerWorker.enabledFamilies +
                            [(AFI(AFI.ipv4), SAFI(SAFI.rtc))]):
            if (afi, safi) not in mp_capabilities:
                self.log.warning(
                    "Peer does not advertise (%s,%s) capability", afi, safi)
            else:
                self.log.info(
                    "Family (%s,%s) successfully negotiated with peer %s",
                    afi, safi, self.peerAddress)
                self._activeFamilies.append((afi, safi))

        if len(self._activeFamilies) == 0:
            self.log.error("No family was negotiated for VPN routes")

        # proceed BGP session

        self.connection.io.setblocking(1)

        self.enqueue(SendKeepAlive)

        self.fsm.state = FSM.OpenConfirm

        self.rtc_active = False

        if self.config['enable_rtc']:
            if (AFI(AFI.ipv4), SAFI(SAFI.rtc)) in mp_capabilities:
                self.log.info(
                    "RTC successfully enabled with peer %s", self.peerAddress)
                self.rtc_active = True
            else:
                self.log.warning(
                    "enable_rtc True but peer not configured for RTC")

    def _toEstablished(self):
        BGPPeerWorker._toEstablished(self)

        if self.rtc_active:
            # subscribe to RTC routes, to be able to propagate them from
            # internal workers to this peer
            self._subscribe(AFI(AFI.ipv4), SAFI(SAFI.rtc))
        else:
            # if we don't use RTC with our peer, then we need to see events for
            # all routes of all active families, to be able to send them to him
            for (afi, safi) in self._activeFamilies:
                self._subscribe(afi, safi)

    def _receiveLoopFun(self):

        select.select([self.connection.io], [], [], 5)

        if not self._queue.empty():
            if self._stopLoops.isSet():
                self.log.info("stopLoops is set -> Close connection and"
                              " Finish receive Loop")
                self.connection.close()
                return 0

        try:
            message = self.protocol.read_message()
        except Notification as e:
            self.log.error("Peer notified us about an error: %s", e)
            return 2
        except Failure as e:
            self.log.warning("Protocol failure: %s", e)
            return 2
        except socket.error as e:
            self.log.warning("Socket error: %s", e)
            return 2
        except Exception as e:
            self.log.error("Error while reading BGP message: %s", e)
            raise

        if message.TYPE in (NOP.TYPE):
            # we arrived here because select call timed-out
            return 1
        elif message.TYPE == Update.TYPE:
            if (self.fsm.state != FSM.Established):
                raise Exception("Update received but not in Established state")
            pass  # see below
        elif message.TYPE == KeepAlive.TYPE:
            if (self.fsm.state == FSM.OpenConfirm):
                self._toEstablished()
            self.enqueue(KeepAliveReceived)
            self.log.debug("Received message: %s", message)
        else:
            self.log.warning("Received unexpected message: %s", message)

        if isinstance(message, Update):
            self.log.info("Received message: UPDATE...")
            if message.routes:
                for route in message.routes:
                    self._processReceivedRoute(route)

        return 1

    def _processReceivedRoute(self, route):
        self.log.info("Received route: %s", route)

        rts = []
        if AttributeID.EXTENDED_COMMUNITY in route.attributes:
            rts = [ecom for ecom in route.attributes[
                   AttributeID.EXTENDED_COMMUNITY].communities
                   if isinstance(ecom, RouteTarget)]

            if not rts:
                raise Exception("Unable to find any Route Targets"
                                "in the received route")

        routeEntry = self._newRouteEntry(route.nlri.afi, route.nlri.safi, rts,
                                         route.nlri, route.attributes)

        if route.action == "announce":
            self._pushEvent(RouteEvent(RouteEvent.ADVERTISE, routeEntry))
        else:
            self._pushEvent(RouteEvent(RouteEvent.WITHDRAW, routeEntry))

        # TODO(tmmorin): move RTC code out-of the peer-specific code
        if (route.nlri.afi, route.nlri.safi) == (AFI(AFI.ipv4),
                                                 SAFI(SAFI.rtc)):
            self.log.info("Received an RTC route")

            if route.nlri.route_target is None:
                self.log.info("Received RTC is a wildcard")

            # the semantic of RTC routes does not distinguish between AFI/SAFIs
            # if our peer subscribed to a Route Target, it means that we needs
            # to send him all routes of any AFI/SAFI carrying this RouteTarget.
            for (afi, safi) in self._activeFamilies:
                if (afi, safi) != (AFI(AFI.ipv4), SAFI(SAFI.rtc)):
                    if route.action == "announce":
                        self._subscribe(afi, safi, route.nlri.route_target)
                    else:  # withdraw
                        self._unsubscribe(afi, safi, route.nlri.route_target)

    def _send(self, data):
        # (error if state not the right one for sending updates)
        self.log.debug("Sending %d bytes on socket to peer %s",
                       len(data), self.peerAddress)
        try:
            self.connection.write(data)
        except Exception as e:
            self.log.error("Was not able to send data: %s", e)

    def _keepAliveMessageData(self):
        return KeepAlive().message()

    def _updateForRouteEvent(self, event):
        r = Route(event.routeEntry.nlri)
        if event.type == event.ADVERTISE:
            self.log.info("Generate UPDATE message: %s", r)
            r.attributes = event.routeEntry.attributes
            try:
                return Update([r]).update(False, self.config['my_as'],
                                          self.config['my_as'])
            except Exception as e:
                self.log.error("Exception while generating message for "
                               "route %s: %s", r, e)
                self.log.warning("%s", traceback.format_exc())
                return ''

        elif event.type == event.WITHDRAW:
            self.log.info("Generate WITHDRAW message: %s", r)
            return Update([r]).withdraw(False, self.config['my_as'],
                                        self.config['my_as'])

    def stop(self):
        if self.connection is not None:
            self.connection.close()
        BGPPeerWorker.stop(self)

    # Looking Glass ###############

    def getLookingGlassLocalInfo(self, pathPrefix):
        return {
            "peeringAddresses": {"peerAddress":  self.peerAddress,
                                 "localAddress": self.localAddress},
            "as_info": {"local": self.config['my_as'],
                        "peer":  self.config['peer_as']},
            "rtc": {"active": self.rtc_active,
                    "enabled": self.config['enable_rtc']},
            "active_families": [repr(f) for f in self._activeFamilies],
        }
class ExaBGPPeerWorker(BGPPeerWorker, LookingGlass):

    enabledFamilies = [
        (AFI(AFI.ipv4), SAFI(SAFI.mpls_vpn)),
        # (AFI(AFI.ipv6), SAFI(SAFI.mpls_vpn)),
        (AFI(AFI.l2vpn), SAFI(SAFI.evpn))
    ]

    def __init__(self, bgpManager, name, peerAddress, config):
        BGPPeerWorker.__init__(self, bgpManager, name, peerAddress)
        self.config = config
        self.localAddress = self.config['local_address']
        self.peerAddress = peerAddress

        self.connection = None

        self.rtc_active = False
        self._activeFamilies = []

    def _toIdle(self):
        self._activeFamilies = []

    def _initiateConnection(self):
        self.log.debug("Initiate ExaBGP connection to %s from %s",
                       self.peerAddress, self.localAddress)

        self.rtc_active = False

        neighbor = Neighbor()
        neighbor.router_id = RouterID(self.config['local_address'])
        neighbor.local_as = self.config['my_as']
        neighbor.peer_as = self.config['peer_as']
        neighbor.local_address = self.config['local_address']
        neighbor.peer_address = self.peerAddress
        neighbor.parse_routes = True

        # create dummy objects to fake exabgp into talking with us
        peer = FakePeer(neighbor)
        local = FakeLocal(self.localAddress)

        try:
            self.connection = Connection(peer, local, None, None)
        except Failure as e:
            raise InitiateConnectionException(repr(e))

        self.log.debug("Instantiate ExaBGP Protocol")
        self.protocol = MyBGPProtocol(peer, self.connection)
        self.protocol.connect()

        # this is highly similar to exabgp.network.peer._run

        o = self.protocol.new_open(False, False, self.config,
                                   ExaBGPPeerWorker.enabledFamilies)

        self.log.debug("Send open: [%s]", o)
        self.fsm.state = FSM.OpenSent

        count = 0
        self.log.debug("Wait for open...")
        while not self.shouldStop:
            # FIXME: we should time-out here, at some point
            message = self.protocol.read_open(o, None)

            count += 1
            if isinstance(message, NOP):
                # TODO(tmmorin): check compliance with BGP specs...
                if count > 20:
                    self.connection.close()
                    # FIXME: this should be moved to
                    # BGPPeerWorker in a more generic way
                    # (+ send Notify when needed)
                    raise OpenWaitTimeout("%ds" % int(20 * 0.5))
                sleep(0.5)
                continue

            self.log.debug("Read message: %s", message)

            if isinstance(message, Open):
                break
            else:
                self.log.error("Received unexpected message: %s", message)
                # FIXME

        if self.shouldStop:
            raise StoppedException()

        # An Open was received
        received_open = message

        self._setHoldTime(received_open.hold_time)

        # Hack to ease troubleshooting, have the real peer address appear in
        # the logs when fakerr is used
        if received_open.router_id.ip != self.peerAddress:
            self.log.info(
                "changing thread name from %s to BGP-x%s, based on"
                " the router-id advertized in Open (different from"
                " peerAddress == %s)", self.name, received_open.router_id.ip,
                self.peerAddress)
            self.name = "BGP-%s:%s" % (self.peerAddress,
                                       received_open.router_id.ip)

        try:
            mp_capabilities = received_open.capabilities[
                Capabilities.MULTIPROTOCOL_EXTENSIONS]
        except Exception:
            mp_capabilities = []

        # check that our peer advertized at least mpls_vpn and evpn
        # capabilities
        self._activeFamilies = []
        for (afi, safi) in (ExaBGPPeerWorker.enabledFamilies +
                            [(AFI(AFI.ipv4), SAFI(SAFI.rtc))]):
            if (afi, safi) not in mp_capabilities:
                self.log.warning("Peer does not advertise (%s,%s) capability",
                                 afi, safi)
            else:
                self.log.info(
                    "Family (%s,%s) successfully negotiated with peer %s", afi,
                    safi, self.peerAddress)
                self._activeFamilies.append((afi, safi))

        if len(self._activeFamilies) == 0:
            self.log.error("No family was negotiated for VPN routes")

        # proceed BGP session

        self.connection.io.setblocking(1)

        self.enqueue(SendKeepAlive)

        self.fsm.state = FSM.OpenConfirm

        self.rtc_active = False

        if self.config['enable_rtc']:
            if (AFI(AFI.ipv4), SAFI(SAFI.rtc)) in mp_capabilities:
                self.log.info("RTC successfully enabled with peer %s",
                              self.peerAddress)
                self.rtc_active = True
            else:
                self.log.warning(
                    "enable_rtc True but peer not configured for RTC")

    def _toEstablished(self):
        BGPPeerWorker._toEstablished(self)

        if self.rtc_active:
            # subscribe to RTC routes, to be able to propagate them from
            # internal workers to this peer
            self._subscribe(AFI(AFI.ipv4), SAFI(SAFI.rtc))
        else:
            # if we don't use RTC with our peer, then we need to see events for
            # all routes of all active families, to be able to send them to him
            for (afi, safi) in self._activeFamilies:
                self._subscribe(afi, safi)

    def _receiveLoopFun(self):

        select.select([self.connection.io], [], [], 5)

        if not self._queue.empty():
            if self._stopLoops.isSet():
                self.log.info("stopLoops is set -> Close connection and"
                              " Finish receive Loop")
                self.connection.close()
                return 0

        try:
            message = self.protocol.read_message()
        except Notification as e:
            self.log.error("Peer notified us about an error: %s", e)
            return 2
        except Failure as e:
            self.log.warning("Protocol failure: %s", e)
            return 2
        except socket.error as e:
            self.log.warning("Socket error: %s", e)
            return 2
        except Exception as e:
            self.log.error("Error while reading BGP message: %s", e)
            raise

        if message.TYPE in (NOP.TYPE):
            # we arrived here because select call timed-out
            return 1
        elif message.TYPE == Update.TYPE:
            if (self.fsm.state != FSM.Established):
                raise Exception("Update received but not in Established state")
            pass  # see below
        elif message.TYPE == KeepAlive.TYPE:
            if (self.fsm.state == FSM.OpenConfirm):
                self._toEstablished()
            self.enqueue(KeepAliveReceived)
            self.log.debug("Received message: %s", message)
        else:
            self.log.warning("Received unexpected message: %s", message)

        if isinstance(message, Update):
            self.log.info("Received message: UPDATE...")
            if message.routes:
                for route in message.routes:
                    self._processReceivedRoute(route)

        return 1

    def _processReceivedRoute(self, route):
        self.log.info("Received route: %s", route)

        rts = []
        if AttributeID.EXTENDED_COMMUNITY in route.attributes:
            rts = [
                ecom for ecom in route.attributes[
                    AttributeID.EXTENDED_COMMUNITY].communities
                if isinstance(ecom, RouteTarget)
            ]

            if not rts:
                raise Exception("Unable to find any Route Targets"
                                "in the received route")

        routeEntry = self._newRouteEntry(route.nlri.afi, route.nlri.safi, rts,
                                         route.nlri, route.attributes)

        if route.action == "announce":
            self._pushEvent(RouteEvent(RouteEvent.ADVERTISE, routeEntry))
        else:
            self._pushEvent(RouteEvent(RouteEvent.WITHDRAW, routeEntry))

        # TODO(tmmorin): move RTC code out-of the peer-specific code
        if (route.nlri.afi, route.nlri.safi) == (AFI(AFI.ipv4),
                                                 SAFI(SAFI.rtc)):
            self.log.info("Received an RTC route")

            if route.nlri.route_target is None:
                self.log.info("Received RTC is a wildcard")

            # the semantic of RTC routes does not distinguish between AFI/SAFIs
            # if our peer subscribed to a Route Target, it means that we needs
            # to send him all routes of any AFI/SAFI carrying this RouteTarget.
            for (afi, safi) in self._activeFamilies:
                if (afi, safi) != (AFI(AFI.ipv4), SAFI(SAFI.rtc)):
                    if route.action == "announce":
                        self._subscribe(afi, safi, route.nlri.route_target)
                    else:  # withdraw
                        self._unsubscribe(afi, safi, route.nlri.route_target)

    def _send(self, data):
        # (error if state not the right one for sending updates)
        self.log.debug("Sending %d bytes on socket to peer %s", len(data),
                       self.peerAddress)
        try:
            self.connection.write(data)
        except Exception as e:
            self.log.error("Was not able to send data: %s", e)

    def _keepAliveMessageData(self):
        return KeepAlive().message()

    def _updateForRouteEvent(self, event):
        r = Route(event.routeEntry.nlri)
        if event.type == event.ADVERTISE:
            self.log.info("Generate UPDATE message: %s", r)
            r.attributes = event.routeEntry.attributes
            try:
                return Update([r]).update(False, self.config['my_as'],
                                          self.config['my_as'])
            except Exception as e:
                self.log.error(
                    "Exception while generating message for "
                    "route %s: %s", r, e)
                self.log.warning("%s", traceback.format_exc())
                return ''

        elif event.type == event.WITHDRAW:
            self.log.info("Generate WITHDRAW message: %s", r)
            return Update([r]).withdraw(False, self.config['my_as'],
                                        self.config['my_as'])

    def stop(self):
        if self.connection is not None:
            self.connection.close()
        BGPPeerWorker.stop(self)

    # Looking Glass ###############

    def getLookingGlassLocalInfo(self, pathPrefix):
        return {
            "peeringAddresses": {
                "peerAddress": self.peerAddress,
                "localAddress": self.localAddress
            },
            "as_info": {
                "local": self.config['my_as'],
                "peer": self.config['peer_as']
            },
            "rtc": {
                "active": self.rtc_active,
                "enabled": self.config['enable_rtc']
            },
            "active_families": [repr(f) for f in self._activeFamilies],
        }
    def _initiateConnection(self):
        self.log.debug("Initiate ExaBGP connection to %s from %s",
                       self.peerAddress, self.localAddress)

        self.rtc_active = False

        neighbor = Neighbor()
        neighbor.router_id = RouterID(self.config['local_address'])
        neighbor.local_as = self.config['my_as']
        neighbor.peer_as = self.config['peer_as']
        neighbor.local_address = self.config['local_address']
        neighbor.peer_address = self.peerAddress
        neighbor.parse_routes = True

        # create dummy objects to fake exabgp into talking with us
        peer = FakePeer(neighbor)
        local = FakeLocal(self.localAddress)

        try:
            self.connection = Connection(peer, local, None, None)
        except Failure as e:
            raise InitiateConnectionException(repr(e))

        self.log.debug("Instantiate ExaBGP Protocol")
        self.protocol = MyBGPProtocol(peer, self.connection)
        self.protocol.connect()

        # this is highly similar to exabgp.network.peer._run

        o = self.protocol.new_open(False, False, self.config,
                                   ExaBGPPeerWorker.enabledFamilies)

        self.log.debug("Send open: [%s]", o)
        self.fsm.state = FSM.OpenSent

        count = 0
        self.log.debug("Wait for open...")
        while not self.shouldStop:
            # FIXME: we should time-out here, at some point
            message = self.protocol.read_open(o, None)

            count += 1
            if isinstance(message, NOP):
                # TODO(tmmorin): check compliance with BGP specs...
                if count > 20:
                    self.connection.close()
                    # FIXME: this should be moved to
                    # BGPPeerWorker in a more generic way
                    # (+ send Notify when needed)
                    raise OpenWaitTimeout("%ds" % int(20 * 0.5))
                sleep(0.5)
                continue

            self.log.debug("Read message: %s", message)

            if isinstance(message, Open):
                break
            else:
                self.log.error("Received unexpected message: %s", message)
                # FIXME

        if self.shouldStop:
            raise StoppedException()

        # An Open was received
        received_open = message

        self._setHoldTime(received_open.hold_time)

        # Hack to ease troubleshooting, have the real peer address appear in
        # the logs when fakerr is used
        if received_open.router_id.ip != self.peerAddress:
            self.log.info(
                "changing thread name from %s to BGP-x%s, based on"
                " the router-id advertized in Open (different from"
                " peerAddress == %s)", self.name, received_open.router_id.ip,
                self.peerAddress)
            self.name = "BGP-%s:%s" % (self.peerAddress,
                                       received_open.router_id.ip)

        try:
            mp_capabilities = received_open.capabilities[
                Capabilities.MULTIPROTOCOL_EXTENSIONS]
        except Exception:
            mp_capabilities = []

        # check that our peer advertized at least mpls_vpn and evpn
        # capabilities
        self._activeFamilies = []
        for (afi, safi) in (ExaBGPPeerWorker.enabledFamilies +
                            [(AFI(AFI.ipv4), SAFI(SAFI.rtc))]):
            if (afi, safi) not in mp_capabilities:
                self.log.warning("Peer does not advertise (%s,%s) capability",
                                 afi, safi)
            else:
                self.log.info(
                    "Family (%s,%s) successfully negotiated with peer %s", afi,
                    safi, self.peerAddress)
                self._activeFamilies.append((afi, safi))

        if len(self._activeFamilies) == 0:
            self.log.error("No family was negotiated for VPN routes")

        # proceed BGP session

        self.connection.io.setblocking(1)

        self.enqueue(SendKeepAlive)

        self.fsm.state = FSM.OpenConfirm

        self.rtc_active = False

        if self.config['enable_rtc']:
            if (AFI(AFI.ipv4), SAFI(SAFI.rtc)) in mp_capabilities:
                self.log.info("RTC successfully enabled with peer %s",
                              self.peerAddress)
                self.rtc_active = True
            else:
                self.log.warning(
                    "enable_rtc True but peer not configured for RTC")