コード例 #1
0
ファイル: configuration.py プロジェクト: leleobhz/exabgp
class _Configuration(object):
    def __init__(self):
        self.processes = {}
        self.neighbors = {}
        self.logger = Logger()

    def inject_change(self, peers, change):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                if change.nlri.family() in self.neighbors[neighbor].families():
                    self.neighbors[neighbor].rib.outgoing.insert_announced(
                        change)
                else:
                    self.logger.configuration(
                        'the route family is not configured on neighbor',
                        'error')
                    result = False
        return result

    def inject_eor(self, peers, family):
        result = False
        for neighbor in self.neighbors:
            if neighbor in peers:
                result = True
                self.neighbors[neighbor].eor.append(family)
        return result

    def inject_operational(self, peers, operational):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                if operational.family() in self.neighbors[neighbor].families():
                    if operational.name == 'ASM':
                        self.neighbors[neighbor].asm[
                            operational.family()] = operational
                    self.neighbors[neighbor].messages.append(operational)
                else:
                    self.logger.configuration(
                        'the route family is not configured on neighbor',
                        'error')
                    result = False
        return result

    def inject_refresh(self, peers, refresh):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                family = (refresh.afi, refresh.safi)
                if family in self.neighbors[neighbor].families():
                    self.neighbors[neighbor].refresh.append(
                        refresh.__class__(refresh.afi, refresh.safi))
                else:
                    result = False
        return result
コード例 #2
0
ファイル: bgp.py プロジェクト: PowerDNS/exabgp
def run(env, comment, configurations, pid=0):
    from exabgp.logger import Logger
    logger = Logger()

    if comment:
        logger.configuration(comment)

    if not env.profile.enable:
        Reactor(configurations).run()
        __exit(env.debug.memory, 0)

    try:
        import cProfile as profile
    except:
        import profile

    if not env.profile.file or env.profile.file == 'stdout':
        profile.run('Reactor(configurations).run()')
        __exit(env.debug.memory, 0)

    if pid:
        profile_name = "%s-pid-%d" % (env.profile.file, pid)
    else:
        profile_name = env.profile.file

    notice = ''
    if os.path.isdir(profile_name):
        notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profile_name
    if os.path.exists(profile_name):
        notice = 'profile can not use this filename as outpout, it already exists (%s)' % profile_name

    if not notice:
        logger.reactor('profiling ....')
        pr = profile.Profile()
        pr.enable()
        try:
            Reactor(configurations).run()
        except:
            raise
        finally:
            pr.disable()
            kprofile = lsprofcalltree.KCacheGrind(pr)

            with open(profile_name, 'w+') as write:
                kprofile.output(write)

            __exit(env.debug.memory, 0)
    else:
        logger.reactor("-" * len(notice))
        logger.reactor(notice)
        logger.reactor("-" * len(notice))
        Reactor(configurations).run()
        __exit(env.debug.memory, 0)
コード例 #3
0
ファイル: bgp.py プロジェクト: fobser/exabgp
def run (env, comment, configurations, pid=0):
	from exabgp.logger import Logger
	logger = Logger()

	if comment:
		logger.configuration(comment)

	if not env.profile.enable:
		ok = Reactor(configurations).run()
		__exit(env.debug.memory,0 if ok else 1)

	try:
		import cProfile as profile
	except ImportError:
		import profile

	if not env.profile.file or env.profile.file == 'stdout':
		ok = profile.run('Reactor(configurations).run()')
		__exit(env.debug.memory,0 if ok else 1)

	if pid:
		profile_name = "%s-pid-%d" % (env.profile.file,pid)
	else:
		profile_name = env.profile.file

	notice = ''
	if os.path.isdir(profile_name):
		notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profile_name
	if os.path.exists(profile_name):
		notice = 'profile can not use this filename as outpout, it already exists (%s)' % profile_name

	if not notice:
		logger.reactor('profiling ....')
		profiler = profile.Profile()
		profiler.enable()
		try:
			ok = Reactor(configurations).run()
		except Exception:
			raise
		finally:
			profiler.disable()
			kprofile = lsprofcalltree.KCacheGrind(profiler)

			with open(profile_name, 'w+') as write:
				kprofile.output(write)

			__exit(env.debug.memory,0 if ok else 1)
	else:
		logger.reactor("-"*len(notice))
		logger.reactor(notice)
		logger.reactor("-"*len(notice))
		Reactor(configurations).run()
		__exit(env.debug.memory,1)
コード例 #4
0
ファイル: configuration.py プロジェクト: Exa-Networks/exabgp
class _Configuration(object):
    def __init__(self):
        self.processes = {}
        self.neighbors = {}
        self.logger = Logger()

    def inject_change(self, peers, change):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                if change.nlri.family() in self.neighbors[neighbor].families():
                    self.neighbors[neighbor].rib.outgoing.insert_announced(change)
                else:
                    self.logger.configuration("the route family is not configured on neighbor", "error")
                    result = False
        return result

    def inject_eor(self, peers, family):
        result = False
        for neighbor in self.neighbors:
            if neighbor in peers:
                result = True
                self.neighbors[neighbor].eor.append(family)
        return result

    def inject_operational(self, peers, operational):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                if operational.family() in self.neighbors[neighbor].families():
                    if operational.name == "ASM":
                        self.neighbors[neighbor].asm[operational.family()] = operational
                    self.neighbors[neighbor].messages.append(operational)
                else:
                    self.logger.configuration("the route family is not configured on neighbor", "error")
                    result = False
        return result

    def inject_refresh(self, peers, refresh):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                family = (refresh.afi, refresh.safi)
                if family in self.neighbors[neighbor].families():
                    self.neighbors[neighbor].refresh.append(refresh.__class__(refresh.afi, refresh.safi))
                else:
                    result = False
        return result
コード例 #5
0
ファイル: bgp.py プロジェクト: ofouzi/exabgp-1
def run(env, comment, configuration, pid=0):
    from exabgp.logger import Logger
    logger = Logger()

    if comment:
        logger.configuration(comment)

    # Set debug point ...
    pydevd.settrace()

    if not env.profile.enable:
        Reactor(configuration).run()
        __exit(env.debug.memory, 0)

    try:
        import cProfile as profile
    except:
        import profile

    if not env.profile.file or env.profile.file == 'stdout':
        profile.run('Reactor(configuration).run()')
        __exit(env.debug.memory, 0)

    if pid:
        profile_name = "%s-pid-%d" % (env.profile.file, pid)
    else:
        profile_name = env.profile.file

    notice = ''
    if os.path.isdir(profile_name):
        notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profile_name
    if os.path.exists(profile_name):
        notice = 'profile can not use this filename as outpout, it already exists (%s)' % profile_name

    if not notice:
        logger.profile('profiling ....')
        profile.run('Reactor(configuration).run()', filename=profile_name)
        __exit(env.debug.memory, 0)
    else:
        logger.profile("-" * len(notice))
        logger.profile(notice)
        logger.profile("-" * len(notice))
        Reactor(configuration).run()
        __exit(env.debug.memory, 0)
コード例 #6
0
ファイル: bgp.py プロジェクト: SidhuG/exabgp
def run (env,comment,configuration,pid=0):
	from exabgp.logger import Logger
	logger = Logger()

	if comment:
		logger.configuration(comment)

	# Set debug point ...
	pydevd.settrace()

	if not env.profile.enable:
		Reactor(configuration).run()
		__exit(env.debug.memory,0)

	try:
		import cProfile as profile
	except:
		import profile

	if not env.profile.file or env.profile.file == 'stdout':
		profile.run('Reactor(configuration).run()')
		__exit(env.debug.memory,0)

	if pid:
		profile_name = "%s-pid-%d" % (env.profile.file,pid)
	else:
		profile_name = env.profile.file

	notice = ''
	if os.path.isdir(profile_name):
		notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profile_name
	if os.path.exists(profile_name):
		notice = 'profile can not use this filename as outpout, it already exists (%s)' % profile_name

	if not notice:
		logger.profile('profiling ....')
		profile.run('Reactor(configuration).run()',filename=profile_name)
		__exit(env.debug.memory,0)
	else:
		logger.profile("-"*len(notice))
		logger.profile(notice)
		logger.profile("-"*len(notice))
		Reactor(configuration).run()
		__exit(env.debug.memory,0)
コード例 #7
0
ファイル: bgp.py プロジェクト: PowerDNS/exabgp
    # check the file only once that we have parsed all the command line options and allowed them to run
    if options["<configuration>"]:
        for f in options["<configuration>"]:
            normalised = os.path.realpath(os.path.normpath(f))
            if os.path.isfile(normalised):
                configurations.append(normalised)
                continue
            if f.startswith('etc/exabgp'):
                normalised = os.path.join(folder, f[11:])
                if os.path.isfile(normalised):
                    configurations.append(normalised)
                    continue
            from exabgp.logger import Logger
            logger = Logger()
            logger.configuration(
                'one of the arguments passed as configuration is not a file (%s)'
                % f, 'error')
            sys.exit(1)

    else:
        print(usage)
        print 'Environment values are:\n' + '\n'.join(
            ' - %s' % _ for _ in environment.default())
        print '\nno configuration file provided'
        sys.exit(1)

    from exabgp.bgp.message.update.attribute.attribute import Attribute
    Attribute.caching = env.cache.attributes

    if env.debug.rotate or len(configurations) == 1:
        run(env, comment, configurations)
コード例 #8
0
ファイル: loop.py プロジェクト: leleobhz/exabgp
class Reactor(object):
    # [hex(ord(c)) for c in os.popen('clear').read()]
    clear = concat_bytes_i(
        character(int(c, 16))
        for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a'])

    def __init__(self, configurations):
        self.ip = environment.settings().tcp.bind
        self.port = environment.settings().tcp.port
        self.respawn = environment.settings().api.respawn

        self.max_loop_time = environment.settings().reactor.speed
        self.early_drop = environment.settings().daemon.drop

        self.logger = Logger()
        self.daemon = Daemon(self)
        self.processes = None
        self.listener = None
        self.configuration = Configuration(configurations)
        self.api = API(self)

        self.peers = {}
        self.route_update = False

        self._stopping = environment.settings().tcp.once
        self._shutdown = False
        self._reload = False
        self._reload_processes = False
        self._restart = False
        self._saved_pid = False
        self._pending = deque()
        self._running = None

        signal.signal(signal.SIGTERM, self.sigterm)
        signal.signal(signal.SIGHUP, self.sighup)
        signal.signal(signal.SIGALRM, self.sigalrm)
        signal.signal(signal.SIGUSR1, self.sigusr1)
        signal.signal(signal.SIGUSR2, self.sigusr2)

    def sigterm(self, signum, frame):
        self.logger.reactor('SIG TERM received - shutdown')
        self._shutdown = True

    def sighup(self, signum, frame):
        self.logger.reactor('SIG HUP received - shutdown')
        self._shutdown = True

    def sigalrm(self, signum, frame):
        self.logger.reactor('SIG ALRM received - restart')
        self._restart = True

    def sigusr1(self, signum, frame):
        self.logger.reactor('SIG USR1 received - reload configuration')
        self._reload = True

    def sigusr2(self, signum, frame):
        self.logger.reactor(
            'SIG USR2 received - reload configuration and processes')
        self._reload = True
        self._reload_processes = True

    def ready(self, sockets, ios, sleeptime=0):
        # never sleep a negative number of second (if the rounding is negative somewhere)
        # never sleep more than one second (should the clock time change during two time.time calls)
        sleeptime = min(max(0.0, sleeptime), 1.0)
        if not ios:
            time.sleep(sleeptime)
            return []
        try:
            read, _, _ = select.select(sockets + ios, [], [], sleeptime)
            return read
        except select.error as exc:
            errno, message = exc.args  # pylint: disable=W0633
            if errno not in error.block:
                raise exc
            return []
        except socket.error as exc:
            if exc.errno in error.fatal:
                raise exc
            return []

    def run(self):
        self.daemon.daemonise()

        # Make sure we create processes once we have closed file descriptor
        # unfortunately, this must be done before reading the configuration file
        # so we can not do it with dropped privileges
        self.processes = Processes(self)

        # we have to read the configuration possibly with root privileges
        # as we need the MD5 information when we bind, and root is needed
        # to bind to a port < 1024

        # this is undesirable as :
        # - handling user generated data as root should be avoided
        # - we may not be able to reload the configuration once the privileges are dropped

        # but I can not see any way to avoid it

        if not self.load():
            return False

        try:
            self.listener = Listener()

            if self.ip:
                self.listener.listen(IP.create(self.ip), IP.create('0.0.0.0'),
                                     self.port, None, False, None)
                self.logger.reactor('Listening for BGP session(s) on %s:%d' %
                                    (self.ip, self.port))

            for neighbor in self.configuration.neighbors.values():
                if neighbor.listen:
                    self.listener.listen(neighbor.md5_ip,
                                         neighbor.peer_address,
                                         neighbor.listen,
                                         neighbor.md5_password,
                                         neighbor.md5_base64, neighbor.ttl_in)
                    self.logger.reactor(
                        'Listening for BGP session(s) on %s:%d%s' %
                        (neighbor.md5_ip, neighbor.listen,
                         ' with MD5' if neighbor.md5_password else ''))
        except NetworkError as exc:
            self.listener = None
            if os.geteuid() != 0 and self.port <= 1024:
                self.logger.reactor(
                    'Can not bind to %s:%d, you may need to run ExaBGP as root'
                    % (self.ip, self.port), 'critical')
            else:
                self.logger.reactor(
                    'Can not bind to %s:%d (%s)' %
                    (self.ip, self.port, str(exc)), 'critical')
            self.logger.reactor(
                'unset exabgp.tcp.bind if you do not want listen for incoming connections',
                'critical')
            self.logger.reactor(
                'and check that no other daemon is already binding to port %d'
                % self.port, 'critical')
            sys.exit(1)

        if not self.early_drop:
            self.processes.start()

        if not self.daemon.drop_privileges():
            self.logger.reactor(
                'Could not drop privileges to \'%s\' refusing to run as root' %
                self.daemon.user, 'critical')
            self.logger.reactor(
                'Set the environmemnt value exabgp.daemon.user to change the unprivileged user',
                'critical')
            return

        if self.early_drop:
            self.processes.start()

        # This is required to make sure we can write in the log location as we now have dropped root privileges
        if not self.logger.restart():
            self.logger.reactor('Could not setup the logger, aborting',
                                'critical')
            return

        if not self.daemon.savepid():
            return

        # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ?
        reload_completed = True

        wait = environment.settings().tcp.delay
        if wait:
            sleeptime = (wait * 60) - int(time.time()) % (wait * 60)
            self.logger.reactor('waiting for %d seconds before connecting' %
                                sleeptime)
            time.sleep(float(sleeptime))

        workers = {}
        peers = set()
        scheduled = False

        while True:
            try:
                finished = False
                start = time.time()
                end = start + self.max_loop_time

                if self._shutdown:
                    self._shutdown = False
                    self.shutdown()
                    break

                if self._reload and reload_completed:
                    self._reload = False
                    self.load()
                    self.processes.start(self._reload_processes)
                    self._reload_processes = False
                elif self._restart:
                    self._restart = False
                    self.restart()

                # We got some API routes to announce
                if self.route_update:
                    self.route_update = False
                    self.route_send()

                for peer in self.peers.keys():
                    peers.add(peer)

                while start < time.time() < end and not finished:
                    if self.peers:
                        for key in list(peers):
                            peer = self.peers[key]
                            action = peer.run()

                            # .run() returns an ACTION enum:
                            # * immediate if it wants to be called again
                            # * later if it should be called again but has no work atm
                            # * close if it is finished and is closing down, or restarting
                            if action == ACTION.CLOSE:
                                self.unschedule(key)
                                peers.discard(key)
                            # we are loosing this peer, not point to schedule more process work
                            elif action == ACTION.LATER:
                                for io in peer.sockets():
                                    workers[io] = key
                                # no need to come back to it before a a full cycle
                                peers.discard(key)

                    if not peers:
                        reload_completed = True

                    if self.listener:
                        for connection in self.listener.connected():
                            # found
                            # * False, not peer found for this TCP connection
                            # * True, peer found
                            # * None, conflict found for this TCP connections
                            found = False
                            for key in self.peers:
                                peer = self.peers[key]
                                neighbor = peer.neighbor
                                # XXX: FIXME: Inet can only be compared to Inet
                                if connection.local == str(
                                        neighbor.peer_address) and (
                                            neighbor.auto_discovery
                                            or connection.peer == str(
                                                neighbor.local_address)):
                                    if peer.incoming(connection):
                                        found = True
                                        break
                                    found = None
                                    break

                            if found:
                                self.logger.reactor(
                                    'accepted connection from  %s - %s' %
                                    (connection.local, connection.peer))
                            elif found is False:
                                self.logger.reactor(
                                    'no session configured for  %s - %s' %
                                    (connection.local, connection.peer))
                                connection.notification(
                                    6, 3, 'no session configured for the peer')
                                connection.close()
                            elif found is None:
                                self.logger.reactor(
                                    'connection refused (already connected to the peer) %s - %s'
                                    % (connection.local, connection.peer))
                                connection.notification(
                                    6, 5, 'could not accept the connection')
                                connection.close()

                    scheduled = self.schedule()
                    finished = not peers and not scheduled

                # RFC state that we MUST not send more than one KEEPALIVE / sec
                # And doing less could cause the session to drop

                if finished:
                    for io in self.ready(list(peers), self.processes.fds(),
                                         end - time.time()):
                        if io in workers:
                            peers.add(workers[io])
                            del workers[io]

                if self._stopping and not self.peers.keys():
                    break

            except KeyboardInterrupt:
                while True:
                    try:
                        self._shutdown = True
                        self.logger.reactor('^C received')
                        break
                    except KeyboardInterrupt:
                        pass
            # socket.error is a subclass of IOError (so catch it first)
            except socket.error:
                try:
                    self._shutdown = True
                    self.logger.reactor('socket error received', 'warning')
                    break
                except KeyboardInterrupt:
                    pass
            except IOError:
                while True:
                    try:
                        self._shutdown = True
                        self.logger.reactor(
                            'I/O Error received, most likely ^C during IO',
                            'warning')
                        break
                    except KeyboardInterrupt:
                        pass
            except SystemExit:
                try:
                    self._shutdown = True
                    self.logger.reactor('exiting')
                    break
                except KeyboardInterrupt:
                    pass
            except ProcessError:
                try:
                    self._shutdown = True
                    self.logger.reactor(
                        'Problem when sending message(s) to helper program, stopping',
                        'error')
                except KeyboardInterrupt:
                    pass
            except select.error:
                try:
                    self._shutdown = True
                    self.logger.reactor('problem using select, stopping',
                                        'error')
                except KeyboardInterrupt:
                    pass
                # from exabgp.leak import objgraph
                # print objgraph.show_most_common_types(limit=20)
                # import random
                # obj = objgraph.by_type('Route')[random.randint(0,2000)]
                # objgraph.show_backrefs([obj], max_depth=10)

    def shutdown(self):
        """terminate all the current BGP connections"""
        self.logger.reactor('performing shutdown')
        if self.listener:
            self.listener.stop()
            self.listener = None
        for key in self.peers.keys():
            self.peers[key].stop()
        self.processes.terminate()
        self.daemon.removepid()
        self._stopping = True

    def load(self):
        """reload the configuration and send to the peer the route which changed"""
        self.logger.reactor('performing reload of exabgp %s' % version)

        reloaded = self.configuration.reload()

        if not reloaded:
            #
            # Careful the string below is used but the QA code to check for sucess of failure
            self.logger.configuration(
                'problem with the configuration file, no change done', 'error')
            # Careful the string above is used but the QA code to check for sucess of failure
            #
            self.logger.configuration(str(self.configuration.error), 'error')
            return False

        for key, peer in self.peers.items():
            if key not in self.configuration.neighbors:
                self.logger.reactor('removing peer: %s' % peer.neighbor.name())
                peer.stop()

        for key, neighbor in self.configuration.neighbors.items():
            # new peer
            if key not in self.peers:
                self.logger.reactor('new peer: %s' % neighbor.name())
                peer = Peer(neighbor, self)
                self.peers[key] = peer
            # modified peer
            elif self.peers[key].neighbor != neighbor:
                self.logger.reactor(
                    'peer definition change, establishing a new connection for %s'
                    % str(key))
                self.peers[key].reestablish(neighbor)
            # same peer but perhaps not the routes
            else:
                # finding what route changed and sending the delta is not obvious
                self.logger.reactor(
                    'peer definition identical, updating peer routes if required for %s'
                    % str(key))
                self.peers[key].reconfigure(neighbor)
        self.logger.configuration('loaded new configuration successfully',
                                  'info')

        return True

    def schedule(self):
        try:
            # read at least on message per process if there is some and parse it
            for service, command in self.processes.received():
                self.api.text(self, service, command)

            # if we have nothing to do, return or save the work
            if not self._running:
                if not self._pending:
                    return False
                self._running, name = self._pending.popleft()
                self.logger.reactor('callback | installing %s' % name)

            if self._running:
                # run it
                try:
                    self.logger.reactor('callback | running')
                    six.next(self._running)  # run
                    # should raise StopIteration in most case
                    # and prevent us to have to run twice to run one command
                    six.next(self._running)  # run
                except StopIteration:
                    self._running = None
                    self.logger.reactor('callback | removing')
                return True

        except StopIteration:
            pass
        except KeyboardInterrupt:
            self._shutdown = True
            self.logger.reactor('^C received', 'error')

    def route_send(self):
        """the process ran and we need to figure what routes to changes"""
        self.logger.reactor('performing dynamic route update')
        for key in self.configuration.neighbors.keys():
            self.peers[key].send_new()
        self.logger.reactor('updated peers dynamic routes successfully')

    def restart(self):
        """kill the BGP session and restart it"""
        self.logger.reactor('performing restart of exabgp %s' % version)
        self.configuration.reload()

        for key in self.peers.keys():
            if key not in self.configuration.neighbors.keys():
                neighbor = self.configuration.neighbors[key]
                self.logger.reactor('removing Peer %s' % neighbor.name())
                self.peers[key].stop()
            else:
                self.peers[key].reestablish()
        self.processes.terminate()
        self.processes.start()

    def unschedule(self, peer):
        if peer in self.peers:
            del self.peers[peer]

    def answer(self, service, string):
        self.processes.write(service, string)
        self.logger.reactor('responding to %s : %s' %
                            (service, string.replace('\n', '\\n')))

    def api_shutdown(self):
        self._shutdown = True
        self._pending = deque()
        self._running = None

    def api_reload(self):
        self._reload = True
        self._pending = deque()
        self._running = None

    def api_restart(self):
        self._restart = True
        self._pending = deque()
        self._running = None

    @staticmethod
    def match_neighbor(description, name):
        for string in description:
            if re.search(r'(^|[\s])%s($|[\s,])' % re.escape(string),
                         name) is None:
                return False
        return True

    def match_neighbors(self, descriptions):
        """return the sublist of peers matching the description passed, or None if no description is given"""
        if not descriptions:
            return self.peers.keys()

        returned = []
        for key in self.peers:
            for description in descriptions:
                if Reactor.match_neighbor(description, key):
                    if key not in returned:
                        returned.append(key)
        return returned

    def nexthops(self, peers):
        return dict(
            (peer, self.peers[peer].neighbor.local_address) for peer in peers)

    def plan(self, callback, name):
        self._pending.append((callback, name))
コード例 #9
0
ファイル: __init__.py プロジェクト: slabakov/exabgp
class Configuration (object):
	def __init__ (self, configurations, text=False):
		self.api_encoder = environment.settings().api.encoder
		self.fifo = environment.settings().api.file

		self.logger = Logger()
		self._configurations = configurations

		self.error = Error()
		self.tokens = Tokeniser(self.error,self.logger)
		self.neighbor = ParseNeighbor(self.error)
		self.family = ParseFamily(self.error)
		self.route = ParseRoute(self.error)
		self.flow = ParseFlow(self.error,self.logger)
		self.l2vpn = ParseL2VPN(self.error)
		self.process = ParseProcess(self.error)

		self._dispatch_neighbor = {
			'description':   self.neighbor.description,
			'router-id':     self.neighbor.router_id,
			'host-name':     self.neighbor.hostname,
			'domain-name':   self.neighbor.domainname,
			'local-address': self.neighbor.ip,
			'local-as':      self.neighbor.asn,
			'peer-as':       self.neighbor.asn,
			'passive':       self.neighbor.passive,
			'listen':        self.neighbor.listen,
			'hold-time':     self.neighbor.holdtime,
			'md5':           self.neighbor.md5,
			'ttl-security':  self.neighbor.ttl,
			'group-updates': self.neighbor.groupupdate,
			'adj-rib-out':   self.neighbor.adjribout,
			'auto-flush':    self.neighbor.autoflush,
		}

		self._dispatch_family = {
			'ipv4':    self.family.ipv4,
			'ipv6':    self.family.ipv6,
			'l2vpn':   self.family.l2vpn,
			'minimal': self.family.minimal,
			'all':     self.family.all,
		}

		self._dispatch_capability = {
			# deprecated
			'route-refresh':    self.neighbor.capability.refresh,
			'graceful-restart': self.neighbor.capability.gracefulrestart,
			'multi-session':    self.neighbor.capability.multisession,
			'add-path':         self.neighbor.capability.addpath,
			'aigp':             self.neighbor.capability.aigp,
			'operational':      self.neighbor.capability.operational,
			'add-path':         self.neighbor.capability.addpath,
			'asn4':             self.neighbor.capability.asn4,
		}

		self._dispatch_route = {
			'origin':              self.route.origin,
			'as-path':             self.route.aspath,
			# For legacy with version 2.0.x
			'as-sequence':         self.route.aspath,
			'med':                 self.route.med,
			'aigp':                self.route.aigp,
			'next-hop':            self.route.next_hop,
			'local-preference':    self.route.local_preference,
			'atomic-aggregate':    self.route.atomic_aggregate,
			'aggregator':          self.route.aggregator,
			'path-information':    self.route.path_information,
			'originator-id':       self.route.originator_id,
			'cluster-list':        self.route.cluster_list,
			'split':               self.route.split,
			'label':               self.route.label,
			'rd':                  self.route.rd,
			'route-distinguisher': self.route.rd,
			'watchdog':            self.route.watchdog,
			# withdrawn is here to not break legacy code
			'withdraw':            self.route.withdraw,
			'withdrawn':           self.route.withdraw,
			'name':                self.route.name,
			'community':           self.route.community,
			'extended-community':  self.route.extended_community,
			'attribute':           self.route.generic_attribute,
		}

		self._dispatch_flow = {
			'rd':                  self.route.rd,
			'route-distinguisher': self.route.rd,
			'next-hop':            self.flow.next_hop,
			'source':              self.flow.source,
			'source-ipv4':         self.flow.source,
			'destination':         self.flow.destination,
			'destination-ipv4':    self.flow.destination,
			'port':                self.flow.anyport,
			'source-port':         self.flow.source_port,
			'destination-port':    self.flow.destination_port,
			'protocol':            self.flow.protocol,
			'next-header':         self.flow.next_header,
			'tcp-flags':           self.flow.tcp_flags,
			'icmp-type':           self.flow.icmp_type,
			'icmp-code':           self.flow.icmp_code,
			'fragment':            self.flow.fragment,
			'dscp':                self.flow.dscp,
			'traffic-class':       self.flow.traffic_class,
			'packet-length':       self.flow.packet_length,
			'flow-label':          self.flow.flow_label,
			'accept':              self.flow.accept,
			'discard':             self.flow.discard,
			'rate-limit':          self.flow.rate_limit,
			'redirect':            self.flow.redirect,
			'redirect-to-nexthop': self.flow.redirect_next_hop,
			'copy':                self.flow.copy,
			'mark':                self.flow.mark,
			'action':              self.flow.action,
			'community':           self.route.community,
			'extended-community':  self.route.extended_community,
		}

		self._dispatch_vpls = {
			'endpoint':            self.l2vpn.vpls_endpoint,
			'offset':              self.l2vpn.vpls_offset,
			'size':                self.l2vpn.vpls_size,
			'base':                self.l2vpn.vpls_base,
			'origin':              self.route.origin,
			'as-path':             self.route.aspath,
			'med':                 self.route.med,
			'next-hop':            self.route.next_hop,
			'local-preference':    self.route.local_preference,
			'originator-id':       self.route.originator_id,
			'cluster-list':        self.route.cluster_list,
			'rd':                  self.route.rd,
			'route-distinguisher': self.route.rd,
			'withdraw':            self.route.withdraw,
			'withdrawn':           self.route.withdraw,
			'name':                self.route.name,
			'community':           self.route.community,
			'extended-community':  self.route.extended_community,
		}

		self._clear()

	def _clear (self):
		self.processes = {}
		self.neighbors = {}
		self._neighbors = {}

		self.error.clear()

		self._scope = []
		self._location = ['root']

		self.tokens.clear()
		self.error.clear()
		self.neighbor.clear()
		self.family.clear()
		self.route.clear()
		self.flow.clear()
		self.l2vpn.clear()
		self.process.clear()

	# Public Interface

	def reload (self):
		try:
			return self._reload()
		except KeyboardInterrupt:
			return self.error.set('configuration reload aborted by ^C or SIGINT')
		except Exception:
			# unhandled configuration parsing issue
			raise

	def _reload (self):
		# taking the first configuration available (FIFO buffer)
		fname = self._configurations.pop(0)
		self.process.configuration(fname)
		self._configurations.append(fname)

		# storing the routes associated with each peer so we can find what changed
		backup_changes = {}
		for neighbor in self._neighbors:
			backup_changes[neighbor] = self._neighbors[neighbor].changes

		# clearing the current configuration to be able to re-parse it
		self._clear()

		if not self.tokens.set_file(fname):
			return False

		# parsing the configurtion
		r = False
		while not self.tokens.finished:
			r = self._dispatch(
				self._scope,'configuration',
				['group','neighbor'],
				[]
			)
			if r is False:
				break

		# handling possible parsing errors
		if r not in [True,None]:
			return self.error.set("\nsyntax error in section %s\nline %d: %s\n\n%s" % (self._location[-1],self.tokens.number,' '.join(self.tokens.line),self.error))

		# parsing was sucessful, assigning the result
		self.neighbors = self._neighbors

		# installing in the neighbor what was its previous routes so we can
		# add/withdraw what need to be
		for neighbor in self.neighbors:
			self.neighbors[neighbor].backup_changes = backup_changes.get(neighbor,[])

		# we are not really running the program, just want to ....
		if environment.settings().debug.route:
			from exabgp.configuration.check import check_message
			if check_message(self.neighbors,environment.settings().debug.route):
				sys.exit(0)
			sys.exit(1)

		# we are not really running the program, just want check the configuration validity
		if environment.settings().debug.selfcheck:
			from exabgp.configuration.check import check_neighbor
			if check_neighbor(self.neighbors):
				sys.exit(0)
			sys.exit(1)

		return True

	# XXX: FIXME: move this from here to the reactor (or whatever will manage command from user later)
	def change_to_peers (self, change, peers):
		result = True
		for neighbor in self.neighbors:
			if neighbor in peers:
				if change.nlri.family() in self.neighbors[neighbor].families():
					self.neighbors[neighbor].rib.outgoing.insert_announced(change)
				else:
					self.logger.configuration('the route family is not configured on neighbor','error')
					result = False
		return result

	# XXX: FIXME: move this from here to the reactor (or whatever will manage command from user later)
	def eor_to_peers (self, family, peers):
		result = False
		for neighbor in self.neighbors:
			if neighbor in peers:
				result = True
				self.neighbors[neighbor].eor.append(family)
		return result

	# XXX: FIXME: move this from here to the reactor (or whatever will manage command from user later)
	def operational_to_peers (self, operational, peers):
		result = True
		for neighbor in self.neighbors:
			if neighbor in peers:
				if operational.family() in self.neighbors[neighbor].families():
					if operational.name == 'ASM':
						self.neighbors[neighbor].asm[operational.family()] = operational
					self.neighbors[neighbor].messages.append(operational)
				else:
					self.logger.configuration('the route family is not configured on neighbor','error')
					result = False
		return result

	# XXX: FIXME: move this from here to the reactor (or whatever will manage command from user later)
	def refresh_to_peers (self, refresh, peers):
		result = True
		for neighbor in self.neighbors:
			if neighbor in peers:
				family = (refresh.afi,refresh.safi)
				if family in self.neighbors[neighbor].families():
					self.neighbors[neighbor].refresh.append(refresh.__class__(refresh.afi,refresh.safi))
				else:
					result = False
		return result

	# Tokenisation

	def number (self):
		return self._number

	# Flow control ......................

	# name is not used yet but will come really handy if we have name collision :D
	def _dispatch (self, scope, name, multi, single, location=None):
		if location:
			self._location = location
			self.flow.clear()
		try:
			tokens = self.tokens.next()
		except IndexError:
			return self.error.set('configuration file incomplete (most likely missing })')
		self.logger.configuration("parsing | %-13s | '%s'" % (name,"' '".join(tokens)))
		end = tokens[-1]
		if multi and end == '{':
			self._location.append(tokens[0])
			return self._multi_line(scope,name,tokens[:-1],multi)
		if single and end == ';':
			return self._single_line(scope,name,tokens[:-1],single)
		if end == '}':
			if len(self._location) == 1:
				return self.error.set('closing too many parenthesis')
			self._location.pop(-1)
			return None
		return False

	def _multi_line (self, scope, name, tokens, valid):
		command = tokens[0]

		if valid and command not in valid:
			return self.error.set('option %s in not valid here' % command)

		if name == 'configuration':
			if command == 'neighbor':
				if self._multi_neighbor(scope,tokens[1:]):
					return self._make_neighbor(scope)
				return False
			if command == 'group':
				if len(tokens) != 2:
					return self.error.set('syntax: group <name> { <options> }')
				return self._multi_group(scope,tokens[1])

		if name == 'group':
			if command == 'neighbor':
				if self._multi_neighbor(scope,tokens[1:]):
					return self._make_neighbor(scope)
				return False
			if command == 'static':
				return self._multi_static(scope,tokens[1:])
			if command == 'flow':
				return self._multi_flow(scope,tokens[1:])
			if command == 'l2vpn':
				return self._multi_l2vpn(scope,tokens[1:])
			if command == 'process':
				return self._multi_process(scope,tokens[1:])
			if command == 'family':
				return self._multi_family(scope,tokens[1:])
			if command == 'capability':
				return self._multi_capability(scope,tokens[1:])
			if command == 'operational':
				return self._multi_operational(scope,tokens[1:])

		if name == 'neighbor':
			if command == 'static':
				return self._multi_static(scope,tokens[1:])
			if command == 'flow':
				return self._multi_flow(scope,tokens[1:])
			if command == 'l2vpn':
				return self._multi_l2vpn(scope,tokens[1:])
			if command == 'process':
				return self._multi_process(scope,tokens[1:])
			if command == 'family':
				return self._multi_family(scope,tokens[1:])
			if command == 'capability':
				return self._multi_capability(scope,tokens[1:])
			if command == 'operational':
				return self._multi_operational(scope,tokens[1:])

		if name == 'static':
			if command == 'route':
				if self._multi_static_route(scope,tokens[1:]):
					return self.route.check_static_route(scope)
				return False

		if name == 'flow':
			if command == 'route':
				if self._multi_flow_route(scope,tokens[1:]):
					return self._check_flow_route(scope)
				return False

		if name == 'l2vpn':
			if command == 'vpls':
				if self._multi_l2vpn_vpls(scope,tokens[1:]):
					return self._check_l2vpn_vpls(scope)
				return False

		if name == 'flow-route':
			if command == 'match':
				if self._multi_match(scope,tokens[1:]):
					return True
				return False
			if command == 'then':
				if self._multi_then(scope,tokens[1:]):
					return True
				return False

		if name == 'process':
			if command in ['send','receive']:
				if self._multi_api(scope,command,tokens[1:]):
					return True
				return False

		return False

	def _single_line (self, scope, name, tokens, valid):
		command = tokens[0]
		if valid and command not in valid:
			return self.error.set('invalid keyword "%s"' % command)

		elif name == 'route':
			if command in self._dispatch_route:
				if command in ('rd','route-distinguisher'):
					return self._dispatch_route[command](scope,tokens[1:],SAFI.mpls_vpn)
				else:
					return self._dispatch_route[command](scope,tokens[1:])

		elif name == 'l2vpn':
			if command in self._dispatch_vpls:
				if command in ('rd','route-distinguisher'):
					return self._dispatch_vpls[command](scope,tokens[1:],SAFI.vpls)
				else:
					return self._dispatch_vpls[command](scope,tokens[1:])

		elif name == 'flow-route':
			if command in self._dispatch_flow:
				if command in ('rd','route-distinguisher'):
					return self._dispatch_flow[command](scope,tokens[1:],SAFI.flow_vpn)
				else:
					return self._dispatch_flow[command](scope,tokens[1:])

		elif name == 'flow-match':
			if command in self._dispatch_flow:
				return self._dispatch_flow[command](scope,tokens[1:])

		elif name == 'flow-then':
			if command in self._dispatch_flow:
				return self._dispatch_flow[command](scope,tokens[1:])

		if name in ('neighbor','group'):
			if command in self._dispatch_neighbor:
				return self._dispatch_neighbor[command](scope,command,tokens[1:])

		elif name == 'family':
			if command in self._dispatch_family:
				return self._dispatch_family[command](scope,tokens[1:])

		elif name == 'capability':
			if command in self._dispatch_capability:
				return self._dispatch_capability[command](scope,command,tokens[1:])

		elif name == 'process':
			if command == 'run':
				return self.process.run(scope,'process-run',tokens[1:])
			if command == 'encoder':
				return self.process.encoder(scope,'encoder',tokens[1:])

			if command == 'neighbor-changes':
				return self.process.command(scope,'neighbor-changes',tokens[1:])

		elif name in ['send','receive']:  # process / send

			if command in ['packets','parsed','consolidate']:
				return self.process.command(scope,'%s-%s' % (name,command),tokens[1:])

			for message in Message.CODE.MESSAGES:
				if command == message.SHORT:
					return self.process.command(scope,'%s-%d' % (name,message),tokens[1:])

		elif name == 'static':
			if command == 'route':
				return self._single_static_route(scope,tokens[1:])

		elif name == 'l2vpn':
			if command == 'vpls':
				return self._single_l2vpn_vpls(scope,tokens[1:])

		elif name == 'operational':
			if command == 'asm':
				return self._single_operational_asm(scope,tokens[1])
			# it does not make sense to have adm

		return False

	# Programs used to control exabgp

	def _multi_process (self, scope, tokens):
		while True:
			r = self._dispatch(
				scope,'process',
				['send','receive'],
				[
					'run','encoder',
					'neighbor-changes',
				]
			)
			if r is False:
				return False
			if r is None:
				break

		name = tokens[0] if len(tokens) >= 1 else 'conf-only-%s' % str(time.time())[-6:]
		self.processes.setdefault(name,{})['neighbor'] = scope[-1]['peer-address'] if 'peer-address' in scope[-1] else '*'

		for key in ['neighbor-changes',]:
			self.processes[name][key] = scope[-1].pop(key,False)

		for direction in ['send','receive']:
			for action in ['packets','parsed','consolidate']:
				key = '%s-%s' % (direction,action)
				self.processes[name][key] = scope[-1].pop(key,False)

			for message in Message.CODE.MESSAGES:
				key = '%s-%d' % (direction,message)
				self.processes[name][key] = scope[-1].pop(key,False)

		run = scope[-1].pop('process-run','')
		if run:
			if len(tokens) != 1:
				return self.error.set(self._str_process_error)

			self.processes[name]['encoder'] = scope[-1].get('encoder','') or self.api_encoder
			self.processes[name]['run'] = run
			return True
		elif len(tokens):
			return self.error.set(self._str_process_error)

	# Limit the AFI/SAFI pair announced to peers

	def _multi_family (self, scope, tokens):
		# we know all the families we should use
		scope[-1]['families'] = []
		while True:
			r = self._dispatch(
				scope,'family',
				[],
				self._dispatch_family.keys()
			)
			if r is False:
				return False
			if r is None:
				break
		self.family.clear()
		return True

	# capacity

	def _multi_capability (self, scope, tokens):
		# we know all the families we should use
		while True:
			r = self._dispatch(
				scope,'capability',
				[],
				self._dispatch_capability.keys()
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	# route grouping with watchdog

	# Group Neighbor

	def _multi_group (self, scope, address):
		scope.append({})
		while True:
			r = self._dispatch(
				scope,'group',
				[
					'static','flow','l2vpn',
					'neighbor','process','family',
					'capability','operational'
				],
				self._dispatch_neighbor.keys()
			)
			if r is False:
				return False
			if r is None:
				scope.pop(-1)
				return True

	def _make_neighbor (self, scope):
		# we have local_scope[-2] as the group template and local_scope[-1] as the peer specific
		if len(scope) > 1:
			for key,content in scope[-2].iteritems():
				if key not in scope[-1]:
					scope[-1][key] = deepcopy(content)
				elif key == 'announce':
					scope[-1][key].extend(scope[-2][key])

		neighbor = Neighbor()
		for local_scope in scope:
			value = local_scope.get('router-id','')
			if value:
				neighbor.router_id = value
			value = local_scope.get('peer-address','')
			if value:
				neighbor.peer_address = value
			value = local_scope.get('local-address','')
			if value:
				neighbor.local_address = value
			value = local_scope.get('local-as','')
			if value:
				neighbor.local_as = value
			value = local_scope.get('peer-as','')
			if value:
				neighbor.peer_as = value
			value = local_scope.get('passive',False)
			if value:
				neighbor.passive = value
			value = local_scope.get('listen',0)
			if value:
				neighbor.listen = value
			value = local_scope.get('hold-time','')
			if value:
				neighbor.hold_time = value

			neighbor.host_name = local_scope.get('host-name',hostname())
			neighbor.domain_name = local_scope.get('domain-name',domainname())

			neighbor.changes = local_scope.get('announce',[])
			messages = local_scope.get('operational-message',[])

		# we want to have a socket for the cli
		if self.fifo:
			_cli_name = 'CLI'
			self.processes[_cli_name] = {
				'neighbor': '*',
				'encoder': 'json',
				'run': [sys.executable, sys.argv[0]],

				'neighbor-changes': False,

				'receive-consolidate': False,
				'receive-packets': False,
				'receive-parsed': False,

				'send-consolidate': False,
				'send-packets': False,
				'send-parsed': False,
			}

			for direction in ['send','receive']:
				for message in [
					Message.CODE.NOTIFICATION,
					Message.CODE.OPEN,
					Message.CODE.KEEPALIVE,
					Message.CODE.UPDATE,
					Message.CODE.ROUTE_REFRESH,
					Message.CODE.OPERATIONAL
				]:
					self.processes[_cli_name]['%s-%d' % (direction,message)] = False

		for name in self.processes.keys():
			process = self.processes[name]

			neighbor.api.set('neighbor-changes',process.get('neighbor-changes',False))

			for direction in ['send','receive']:
				for option in ['packets','consolidate','parsed']:
					neighbor.api.set_value(direction,option,process.get('%s-%s' % (direction,option),False))

				for message in [
					Message.CODE.NOTIFICATION,
					Message.CODE.OPEN,
					Message.CODE.KEEPALIVE,
					Message.CODE.UPDATE,
					Message.CODE.ROUTE_REFRESH,
					Message.CODE.OPERATIONAL
				]:
					neighbor.api.set_message(direction,message,process.get('%s-%d' % (direction,message),False))

		if not neighbor.router_id:
			neighbor.router_id = neighbor.local_address

		local_scope = scope[-1]
		neighbor.description = local_scope.get('description','')

		neighbor.md5 = local_scope.get('md5',None)
		neighbor.ttl = local_scope.get('ttl-security',None)
		neighbor.group_updates = local_scope.get('group-updates',None)

		neighbor.route_refresh = local_scope.get('route-refresh',0)
		neighbor.graceful_restart = local_scope.get('graceful-restart',0)
		if neighbor.graceful_restart is None:
			# README: Should it be a subclass of int ?
			neighbor.graceful_restart = int(neighbor.hold_time)
		neighbor.multisession = local_scope.get('multi-session',False)
		neighbor.operational = local_scope.get('capa-operational',False)
		neighbor.add_path = local_scope.get('add-path',0)
		neighbor.flush = local_scope.get('auto-flush',True)
		neighbor.adjribout = local_scope.get('adj-rib-out',True)
		neighbor.asn4 = local_scope.get('asn4',True)
		neighbor.aigp = local_scope.get('aigp',None)

		if neighbor.route_refresh and not neighbor.adjribout:
			return self.error.set('incomplete option route-refresh and no adj-rib-out')

		# XXX: check that if we have any message, we have parsed/packets
		# XXX: and vice-versa

		missing = neighbor.missing()
		if missing:
			return self.error.set('incomplete neighbor, missing %s' % missing)

		if neighbor.local_address.afi != neighbor.peer_address.afi:
			return self.error.set('local-address and peer-address must be of the same family')

		if neighbor.peer_address.ip in self._neighbors:
			return self.error.set('duplicate peer definition %s' % neighbor.peer_address.ip)

		openfamilies = local_scope.get('families','everything')
		# announce every family we known
		if neighbor.multisession and openfamilies == 'everything':
			# announce what is needed, and no more, no need to have lots of TCP session doing nothing
			_families = set()
			for change in neighbor.changes:
				_families.add((change.nlri.afi,change.nlri.safi))
			families = list(_families)
		elif openfamilies in ('all','everything'):
			families = NLRI.known_families()
		# only announce what you have as routes
		elif openfamilies == 'minimal':
			_families = set()
			for change in neighbor.changes:
				_families.add((change.nlri.afi,change.nlri.safi))
			families = list(_families)
		else:
			families = openfamilies

		# check we are not trying to announce routes without the right MP announcement
		for family in neighbor.families():
			if family not in families:
				afi,safi = family
				return self.error.set('Trying to announce a route of type %s,%s when we are not announcing the family to our peer' % (afi,safi))

		# add the families to the list of families known
		initial_families = list(neighbor.families())
		for family in families:
			if family not in initial_families	:
				# we are modifying the data used by .families() here
				neighbor.add_family(family)

		if neighbor.group_updates is None:
			neighbor.group_updates = True

		def _init_neighbor (neighbor):
			families = neighbor.families()
			for change in neighbor.changes:
				if change.nlri.family() in families:
					# This add the family to neighbor.families()
					neighbor.rib.outgoing.insert_announced_watchdog(change)
			for message in messages:
				if message.family() in families:
					if message.name == 'ASM':
						neighbor.asm[message.family()] = message
					else:
						neighbor.messages.append(message)
			self._neighbors[neighbor.name()] = neighbor

		# create one neighbor object per family for multisession
		if neighbor.multisession and len(neighbor.families()) > 1:
			for family in neighbor.families():
				# XXX: FIXME: Ok, it works but it takes LOTS of memory ..
				m_neighbor = deepcopy(neighbor)
				m_neighbor.make_rib()
				m_neighbor.rib.outgoing.families = [family]
				_init_neighbor(m_neighbor)
		else:
			neighbor.make_rib()
			_init_neighbor(neighbor)

		# display configuration
		for line in str(neighbor).split('\n'):
			self.logger.configuration(line)
		self.logger.configuration("\n")

		# ...
		scope.pop(-1)
		return True

	def _multi_neighbor (self, scope, tokens):
		if len(tokens) != 1:
			return self.error.set('syntax: neighbor <ip> { <options> }')

		address = tokens[0]
		scope.append({})
		try:
			scope[-1]['peer-address'] = IP.create(address)
		except (IndexError,ValueError,socket.error):
			return self.error.set('"%s" is not a valid IP address' % address)

		while True:
			r = self._dispatch(
				scope,'neighbor',
				[
					'static','flow','l2vpn',
					'process','family','capability','operational'
				],
				[
					'description','router-id','local-address','local-as','peer-as',
					'host-name','domain-name',
					'passive','listen','hold-time','add-path','graceful-restart','md5',
					'ttl-security','multi-session','group-updates','asn4','aigp',
					'auto-flush','adj-rib-out'
				]
			)
			# XXX: THIS SHOULD ALLOW CAPABILITY AND NOT THE INDIVIDUAL SUB KEYS
			if r is False:
				return False
			if r is None:
				return True

	#  Group Static ................

	def _multi_static (self, scope, tokens):
		if len(tokens) != 0:
			return self.error.set('syntax: static { route; route; ... }')

		while True:
			r = self._dispatch(
				scope,'static',
				['route',],
				['route',]
			)
			if r is False:
				return False
			if r is None:
				return True

	# Group Route  ........

	def _split_last_route (self, scope):
		# if the route does not need to be broken in smaller routes, return
		change = scope[-1]['announce'][-1]
		if Attribute.CODE.INTERNAL_SPLIT not in change.attributes:
			return True

		# ignore if the request is for an aggregate, or the same size
		mask = change.nlri.mask
		split = change.attributes[Attribute.CODE.INTERNAL_SPLIT]
		if mask >= split:
			return True

		# get a local copy of the route
		change = scope[-1]['announce'].pop(-1)

		# calculate the number of IP in the /<size> of the new route
		increment = pow(2,(len(change.nlri.packed)*8) - split)
		# how many new routes are we going to create from the initial one
		number = pow(2,split - change.nlri.mask)

		# convert the IP into a integer/long
		ip = 0
		for c in change.nlri.packed:
			ip <<= 8
			ip += ord(c)

		afi = change.nlri.afi
		safi = change.nlri.safi

		# Really ugly
		klass = change.nlri.__class__
		if klass is INET:
			path_info = change.nlri.path_info
		elif klass is MPLS:
			path_info = None
			labels = change.nlri.labels
			rd = change.nlri.rd
		# packed and not pack() but does not matter atm, it is an IP not a NextHop
		nexthop = change.nlri.nexthop.packed

		change.nlri.mask = split
		change.nlri = None
		# generate the new routes
		for _ in range(number):
			# update ip to the next route, this recalculate the "ip" field of the Inet class
			nlri = klass(afi,safi,pack_int(afi,ip,split),split,nexthop,OUT.ANNOUNCE,path_info)
			if klass is MPLS:
				nlri.labels = labels
				nlri.rd = rd
			# next ip
			ip += increment
			# save route
			scope[-1]['announce'].append(Change(nlri,change.attributes))

		return True

	def _multi_static_route (self, scope, tokens):
		if len(tokens) != 1:
			return self.error.set(self.route.syntax)

		if not self.route.insert_static_route(scope,tokens):
			return False

		while True:
			r = self._dispatch(
				scope,'route',
				[],
				[
					'next-hop','origin','as-path','as-sequence','med','aigp',
					'local-preference','atomic-aggregate','aggregator',
					'path-information','community','originator-id','cluster-list',
					'extended-community','split','label','rd','route-distinguisher',
					'watchdog','withdraw','attribute'
				]
			)
			if r is False:
				return False
			if r is None:
				return self._split_last_route(scope)

	def _single_static_route (self, scope, tokens):
		if len(tokens) < 3:
			return False

		if not self.route.insert_static_route(scope,tokens):
			return False

		while len(tokens):
			command = tokens.pop(0)

			if command in ('withdraw','withdrawn'):
				if self.route.withdraw(scope,tokens):
					continue
				return False

			if len(tokens) < 1:
				return False

			if command in self._dispatch_route:
				if command in ('rd','route-distinguisher'):
					if self._dispatch_route[command](scope,tokens,SAFI.nlri_mpls):
						continue
				else:
					if self._dispatch_route[command](scope,tokens):
						continue
			else:
				return False
			return False

		if not self.route.check_static_route(scope):
			return False

		return self._split_last_route(scope)

	def _single_l2vpn_vpls (self, scope, tokens):
		# TODO: actual length?(like rd+lb+bo+ve+bs+rd; 14 or so)
		if len(tokens) < 10:
			return False

		if not self._insert_l2vpn_vpls(scope,tokens):
			return False

		while len(tokens):
			command = tokens.pop(0)
			if len(tokens) < 1:
				return False
			if command in self._dispatch_vpls:
				if command in ('rd','route-distinguisher'):
					if self._dispatch_vpls[command](scope,tokens,SAFI.vpls):
						continue
				else:
					if self._dispatch_vpls[command](scope,tokens):
						continue
			else:
				return False
			return False

		if not self._check_l2vpn_vpls(scope):
			return False
		return True

	# VPLS

	def _multi_l2vpn (self, scope, tokens):
		if len(tokens) != 0:
			return self.error.set(self.l2vpn.syntax)

		while True:
			r = self._dispatch(
				scope,'l2vpn',
				['vpls',],
				['vpls',]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	def _insert_l2vpn_vpls (self, scope, tokens=None):
		try:
			attributes = Attributes()
			change = Change(VPLS(None,None,None,None,None),attributes)
		except ValueError:
			return self.error.set(self.l2vpn.syntax)

		if 'announce' not in scope[-1]:
			scope[-1]['announce'] = []

		scope[-1]['announce'].append(change)
		return True

	def _multi_l2vpn_vpls (self, scope, tokens):
		if len(tokens) > 1:
			return self.error.set(self.l2vpn.syntax)

		if not self._insert_l2vpn_vpls(scope):
			return False

		while True:
			r = self._dispatch(
				scope,'l2vpn',
				[],
				[
					'next-hop','origin','as-path','med','local-preference',
					'community','originator-id','cluster-list','extended-community',
					'rd','route-distinguisher','withdraw',
					'endpoint','offset',
					'size','base'
				]
			)
			if r is False:
				return False
			if r is None:
				break

		return True

	# Group Flow  ........

	def _multi_flow (self, scope, tokens):
		if len(tokens) != 0:
			return self.error.set(self.flow.syntax)

		while True:
			r = self._dispatch(
				scope,'flow',
				['route',],
				[]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	def _insert_flow_route (self, scope, tokens=None):
		if self.flow.state != 'out':
			return self.error.set(self.flow.syntax)

		self.flow.state = 'match'

		try:
			attributes = Attributes()
			attributes[Attribute.CODE.EXTENDED_COMMUNITY] = ExtendedCommunities()
			flow = Change(Flow(),attributes)
		except ValueError:
			return self.error.set(self.flow.syntax)

		if 'announce' not in scope[-1]:
			scope[-1]['announce'] = []

		scope[-1]['announce'].append(flow)
		return True

	def _check_flow_route (self, scope):
		self.logger.configuration('warning: no check on flows are implemented')
		return True

	def _check_l2vpn_vpls (self, scope):
		nlri = scope[-1]['announce'][-1].nlri

		if nlri.ve is None:
			raise ValueError(self._str_vpls_bad_enpoint)

		if nlri.base is None:
			raise ValueError(self._str_vpls_bad_label)

		if nlri.offset is None:
			raise ValueError(self._str_vpls_bad_offset)

		if nlri.size is None:
			raise ValueError(self._str_vpls_bad_size)

		if nlri.base > (0xFFFFF - nlri.size):  # 20 bits, 3 bytes
			raise ValueError(self._str_vpls_bad_label)

		return True

	def _multi_flow_route (self, scope, tokens):
		if len(tokens) > 1:
			return self.error.set(self.flow.syntax)

		if not self._insert_flow_route(scope):
			return False

		while True:
			r = self._dispatch(
				scope,'flow-route',
				['match','then'],
				['rd','route-distinguisher','next-hop']
			)
			if r is False:
				return False
			if r is None:
				break

		if self.flow.state != 'out':
			return self.error.set(self.flow.syntax)

		return True

	# ..........................................

	def _multi_match (self, scope, tokens):
		if len(tokens) != 0:
			return self.error.set(self.flow.syntax)

		if self.flow.state != 'match':
			return self.error.set(self.flow.syntax)

		self.flow.state = 'then'

		while True:
			r = self._dispatch(
				scope,'flow-match',
				[],
				[
					'source','destination',
					'source-ipv4','destination-ipv4',
					'port','source-port','destination-port',
					'protocol','next-header','tcp-flags','icmp-type','icmp-code',
					'fragment','dscp','traffic-class','packet-length','flow-label'
				]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	def _multi_then (self, scope, tokens):
		if len(tokens) != 0:
			return self.error.set(self.flow.syntax)

		if self.flow.state != 'then':
			return self.error.set(self.flow.syntax)

		self.flow.state = 'out'

		while True:
			r = self._dispatch(
				scope,'flow-then',
				[],
				[
					'accept','discard','rate-limit',
					'redirect','copy','redirect-to-nexthop',
					'mark','action',
					'community','extended-community'
				]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	# ..........................................

	def _multi_api (self, scope, direction, tokens):
		if len(tokens) != 0:
			return self.error.set(self.flow.syntax)

		while True:
			r = self._dispatch(
				scope,direction,
				[],
				[
					'packets','parsed','consolidate',
					'notification','open','keepalive',
					'update','refresh','operational'
				]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	#  Group Operational ................

	def _multi_operational (self, scope, tokens):
		if len(tokens) != 0:
			return self.error.set('syntax: operational { command; command; ... }')

		while True:
			r = self._dispatch(
				scope,'operational',
				[],
				['asm',]
			)
			if r is False:
				return False
			if r is None:
				return True

	def _single_operational_asm (self, scope, value):
		return self._single_operational(Advisory.ASM,scope,['afi','safi','advisory'],value)

	def _single_operational (self, klass, scope, parameters, value):
		def utf8 (string): return string.encode('utf-8')[1:-1]

		convert = {
			'afi': AFI.value,
			'safi': SAFI.value,
			'sequence': int,
			'counter': long,
			'advisory': utf8
		}

		def valid (_):
			return True

		def u32 (_):
			return int(_) <= 0xFFFFFFFF

		def u64 (_):
			return long(_) <= 0xFFFFFFFFFFFFFFFF

		def advisory (_):
			return len(_.encode('utf-8')) <= MAX_ADVISORY + 2  # the two quotes

		validate = {
			'afi': AFI.value,
			'safi': SAFI.value,
			'sequence': u32,
			'counter': u64,
		}

		number = len(parameters)*2
		tokens = formated(value).split(' ',number-1)
		if len(tokens) != number:
			return self.error.set('invalid operational syntax, wrong number of arguments')
			return False

		data = {}

		while tokens and parameters:
			command = tokens.pop(0).lower()
			value = tokens.pop(0)

			if command == 'router-id':
				if isipv4(value):
					data['routerid'] = RouterID(value)
				else:
					return self.error.set('invalid operational value for %s' % command)
					return False
				continue

			expected = parameters.pop(0)

			if command != expected:
				return self.error.set('invalid operational syntax, unknown argument %s' % command)
				return False
			if not validate.get(command,valid)(value):
				return self.error.set('invalid operational value for %s' % command)
				return False

			data[command] = convert[command](value)

		if tokens or parameters:
			return self.error.set('invalid advisory syntax, missing argument(s) %s' % ', '.join(parameters))
			return False

		if 'routerid' not in data:
			data['routerid'] = None

		if 'operational-message' not in scope[-1]:
			scope[-1]['operational-message'] = []

		# iterate on each family for the peer if multiprotocol is set.
		scope[-1]['operationa-messagel'].append(klass(**data))
		return True
コード例 #10
0
ファイル: bgp.py プロジェクト: ajufrancis/exabgp
		env.debug.selfcheck = True
		env.log.parser = True

	if options.memory:
		env.debug.memory = True


	configurations = []
	# check the file only once that we have parsed all the command line options and allowed them to run
	if options.configuration:
		for f in options.configuration:
			configurations.append(os.path.realpath(os.path.normpath(f)))
	else:
		from exabgp.logger import Logger
		logger = Logger()
		logger.configuration('no configuration file provided','error')
		sys.exit(1)

	for configuration in configurations:
		if not os.path.isfile(configuration):
			from exabgp.logger import Logger
			logger = Logger()
			logger.configuration('the argument passed as configuration is not a file','error')
			sys.exit(1)

	from exabgp.bgp.message.update.attribute.nexthop import NextHop
	NextHop.caching = env.cache.nexthops

	from exabgp.bgp.message.update.attribute.communities import Community
	Community.caching = env.cache.attributes
コード例 #11
0
ファイル: loop.py プロジェクト: javacruft/exabgp
class Reactor(object):
    # [hex(ord(c)) for c in os.popen('clear').read()]
    clear = concat_bytes_i(
        character(int(c, 16))
        for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a'])

    def __init__(self, configurations):
        self.ips = environment.settings().tcp.bind
        self.port = environment.settings().tcp.port
        self.ack = environment.settings().api.ack

        self.max_loop_time = environment.settings().reactor.speed
        self.early_drop = environment.settings().daemon.drop

        self.logger = Logger()
        self.daemon = Daemon(self)
        self.processes = None
        self.listener = None
        self.configuration = Configuration(configurations)
        self.api = API(self)

        self.peers = {}
        self.route_update = False

        self._stopping = environment.settings().tcp.once
        self._shutdown = False
        self._reload = False
        self._reload_processes = False
        self._restart = False
        self._saved_pid = False
        self._running = None
        self._pending = deque()
        self._async = deque()

        self._signal = {}

        signal.signal(signal.SIGTERM, self.sigterm)
        signal.signal(signal.SIGHUP, self.sighup)
        signal.signal(signal.SIGALRM, self.sigalrm)
        signal.signal(signal.SIGUSR1, self.sigusr1)
        signal.signal(signal.SIGUSR2, self.sigusr2)

    def _termination(self, reason):
        while True:
            try:
                self._shutdown = True
                self.logger.reactor(reason, 'warning')
                break
            except KeyboardInterrupt:
                pass

    def sigterm(self, signum, frame):
        self.logger.reactor('SIG TERM received - shutdown')
        self._shutdown = True
        for key in self.peers:
            if self.peers[key].neighbor.api['signal']:
                self._signal[key] = signum

    def sighup(self, signum, frame):
        self.logger.reactor('SIG HUP received - shutdown')
        self._shutdown = True
        for key in self.peers:
            if self.peers[key].neighbor.api['signal']:
                self._signal[key] = signum

    def sigalrm(self, signum, frame):
        self.logger.reactor('SIG ALRM received - restart')
        self._restart = True
        for key in self.peers:
            if self.peers[key].neighbor.api['signal']:
                self._signal[key] = signum

    def sigusr1(self, signum, frame):
        self.logger.reactor('SIG USR1 received - reload configuration')
        self._reload = True
        for key in self.peers:
            if self.peers[key].neighbor.api['signal']:
                self._signal[key] = signum

    def sigusr2(self, signum, frame):
        self.logger.reactor(
            'SIG USR2 received - reload configuration and processes')
        self._reload = True
        self._reload_processes = True
        for key in self.peers:
            if self.peers[key].neighbor.api['signal']:
                self._signal[key] = signum

    def _api_ready(self, sockets):
        sleeptime = self.max_loop_time / 20
        fds = self.processes.fds()
        ios = fds + sockets
        try:
            read, _, _ = select.select(ios, [], [], sleeptime)
            for fd in fds:
                if fd in read:
                    read.remove(fd)
            return read
        except select.error as exc:
            errno, message = exc.args  # pylint: disable=W0633
            if errno not in error.block:
                raise exc
            return []
        except socket.error as exc:
            if exc.errno in error.fatal:
                raise exc
            return []
        except KeyboardInterrupt:
            self._termination('^C received')
            return []

    def _setup_listener(self, local_addr, remote_addr, port, md5_password,
                        md5_base64, ttl_in):
        try:
            if not self.listener:
                self.listener = Listener()
            if not remote_addr:
                remote_addr = IP.create(
                    '0.0.0.0') if local_addr.ipv4() else IP.create('::')
            self.listener.listen(local_addr, remote_addr, port, md5_password,
                                 md5_base64, ttl_in)
            self.logger.reactor(
                'Listening for BGP session(s) on %s:%d%s' %
                (local_addr, port, ' with MD5' if md5_password else ''))
            return True
        except NetworkError as exc:
            if os.geteuid() != 0 and port <= 1024:
                self.logger.reactor(
                    'Can not bind to %s:%d, you may need to run ExaBGP as root'
                    % (local_addr, port), 'critical')
            else:
                self.logger.reactor(
                    'Can not bind to %s:%d (%s)' %
                    (local_addr, port, str(exc)), 'critical')
            self.logger.reactor(
                'unset exabgp.tcp.bind if you do not want listen for incoming connections',
                'critical')
            self.logger.reactor(
                'and check that no other daemon is already binding to port %d'
                % port, 'critical')
            return False

    def _handle_listener(self):
        if not self.listener:
            return

        ranged_neighbor = []

        for connection in self.listener.connected():
            for key in self.peers:
                peer = self.peers[key]
                neighbor = peer.neighbor

                connection_local = IP.create(connection.local).address()
                neighbor_peer_start = neighbor.peer_address.address()
                neighbor_peer_next = neighbor_peer_start + neighbor.range_size

                if not neighbor_peer_start <= connection_local < neighbor_peer_next:
                    continue

                connection_peer = IP.create(connection.peer).address()
                neighbor_local = neighbor.local_address.address()

                if connection_peer != neighbor_local:
                    if not neighbor.auto_discovery:
                        continue

                # we found a range matching for this connection
                # but the peer may already have connected, so
                # we need to iterate all individual peers before
                # handling "range" peers
                if neighbor.range_size > 1:
                    ranged_neighbor.append(peer.neighbor)
                    continue

                denied = peer.handle_connection(connection)
                if denied:
                    self.logger.reactor(
                        'refused connection from %s due to the state machine' %
                        connection.name())
                    self._async.append(denied)
                    break
                self.logger.reactor('accepted connection from %s' %
                                    connection.name())
                break
            else:
                # we did not break (and nothign was found/done or we have group match)
                matched = len(ranged_neighbor)
                if matched > 1:
                    self.logger.reactor(
                        'could not accept connection from %s (more than one neighbor match)'
                        % connection.name())
                    self._async.append(
                        connection.notification(
                            6, 5,
                            b'could not accept the connection (more than one neighbor match)'
                        ))
                    return
                if not matched:
                    self.logger.reactor('no session configured for %s' %
                                        connection.name())
                    self._async.append(
                        connection.notification(
                            6, 3, b'no session configured for the peer'))
                    return

                new_neighbor = copy.copy(ranged_neighbor[0])
                new_neighbor.range_size = 1
                new_neighbor.generated = True
                new_neighbor.local_address = IP.create(connection.peer)
                new_neighbor.peer_address = IP.create(connection.local)

                new_peer = Peer(new_neighbor, self)
                denied = new_peer.handle_connection(connection)
                if denied:
                    self.logger.reactor(
                        'refused connection from %s due to the state machine' %
                        connection.name())
                    self._async.append(denied)
                    return

                self.peers[new_neighbor.name()] = new_peer
                return

    def run(self, validate):
        self.daemon.daemonise()

        # Make sure we create processes once we have closed file descriptor
        # unfortunately, this must be done before reading the configuration file
        # so we can not do it with dropped privileges
        self.processes = Processes(self)

        # we have to read the configuration possibly with root privileges
        # as we need the MD5 information when we bind, and root is needed
        # to bind to a port < 1024

        # this is undesirable as :
        # - handling user generated data as root should be avoided
        # - we may not be able to reload the configuration once the privileges are dropped

        # but I can not see any way to avoid it
        for ip in self.ips:
            if not self._setup_listener(ip, None, self.port, None, False,
                                        None):
                return False

        if not self.load():
            return False

        if validate:  # only validate configuration
            self.logger.configuration('')
            self.logger.configuration('Parsed Neighbors, un-templated')
            self.logger.configuration('------------------------------')
            self.logger.configuration('')
            for key in self.peers:
                self.logger.configuration(str(self.peers[key].neighbor))
                self.logger.configuration('')
            return True

        for neighbor in self.configuration.neighbors.values():
            if neighbor.listen:
                if not self._setup_listener(
                        neighbor.md5_ip, neighbor.peer_address,
                        neighbor.listen, neighbor.md5_password,
                        neighbor.md5_base64, neighbor.ttl_in):
                    return False

        if not self.early_drop:
            self.processes.start()

        if not self.daemon.drop_privileges():
            self.logger.reactor(
                'Could not drop privileges to \'%s\' refusing to run as root' %
                self.daemon.user, 'critical')
            self.logger.reactor(
                'Set the environmemnt value exabgp.daemon.user to change the unprivileged user',
                'critical')
            return

        if self.early_drop:
            self.processes.start()

        # This is required to make sure we can write in the log location as we now have dropped root privileges
        if not self.logger.restart():
            self.logger.reactor('Could not setup the logger, aborting',
                                'critical')
            return

        if not self.daemon.savepid():
            return

        # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ?
        reload_completed = True

        wait = environment.settings().tcp.delay
        if wait:
            sleeptime = (wait * 60) - int(time.time()) % (wait * 60)
            self.logger.reactor('waiting for %d seconds before connecting' %
                                sleeptime)
            time.sleep(float(sleeptime))

        workers = {}
        peers = set()
        busy = False

        while True:
            try:
                start = time.time()
                end = start + self.max_loop_time

                if self._shutdown:
                    self._shutdown = False
                    self.shutdown()
                    break

                if self._reload and reload_completed:
                    self._reload = False
                    self.load()
                    self.processes.start(self._reload_processes)
                    self._reload_processes = False
                elif self._restart:
                    self._restart = False
                    self.restart()

                # We got some API routes to announce
                if self.route_update:
                    self.route_update = False
                    self.route_send()

                for key, peer in self.peers.items():
                    if not peer.neighbor.passive or peer.proto:
                        peers.add(key)
                    if key in self._signal:
                        self.peers[key].reactor.processes.signal(
                            self.peers[key].neighbor, self._signal[key])
                self._signal = {}

                # check all incoming connection
                self._handle_listener()

                # give a turn to all the peers
                while start < time.time() < end:
                    for key in list(peers):
                        peer = self.peers[key]
                        action = peer.run()

                        # .run() returns an ACTION enum:
                        # * immediate if it wants to be called again
                        # * later if it should be called again but has no work atm
                        # * close if it is finished and is closing down, or restarting
                        if action == ACTION.CLOSE:
                            self._unschedule(key)
                            peers.discard(key)
                        # we are loosing this peer, not point to schedule more process work
                        elif action == ACTION.LATER:
                            for io in peer.sockets():
                                workers[io] = key
                            # no need to come back to it before a a full cycle
                            peers.discard(key)

                    # handle API calls
                    busy = self._scheduled_api()
                    # handle new connections
                    busy |= self._scheduled_listener()

                    if not peers and not busy:
                        break

                if not peers:
                    reload_completed = True

                for io in self._api_ready(list(workers)):
                    peers.add(workers[io])
                    del workers[io]

                if self._stopping and not self.peers.keys():
                    break

            except KeyboardInterrupt:
                self._termination('^C received')
            # socket.error is a subclass of IOError (so catch it first)
            except socket.error:
                self._termination('socket error received')
            except IOError:
                self._termination(
                    'I/O Error received, most likely ^C during IO')
            except SystemExit:
                self._termination('exiting')
            except ProcessError:
                self._termination(
                    'Problem when sending message(s) to helper program, stopping'
                )
            except select.error:
                self._termination('problem using select, stopping')

    def shutdown(self):
        """terminate all the current BGP connections"""
        self.logger.reactor('performing shutdown')
        if self.listener:
            self.listener.stop()
            self.listener = None
        for key in self.peers.keys():
            self.peers[key].stop()
        self.processes.terminate()
        self.daemon.removepid()
        self._stopping = True

    def load(self):
        """reload the configuration and send to the peer the route which changed"""
        self.logger.reactor('performing reload of exabgp %s' % version)

        reloaded = self.configuration.reload()

        if not reloaded:
            #
            # Careful the string below is used but the QA code to check for sucess of failure
            self.logger.configuration(
                'problem with the configuration file, no change done', 'error')
            # Careful the string above is used but the QA code to check for sucess of failure
            #
            self.logger.configuration(str(self.configuration.error), 'error')
            return False

        for key, peer in self.peers.items():
            if key not in self.configuration.neighbors:
                self.logger.reactor('removing peer: %s' % peer.neighbor.name())
                peer.stop()

        for key, neighbor in self.configuration.neighbors.items():
            # new peer
            if key not in self.peers:
                self.logger.reactor('new peer: %s' % neighbor.name())
                peer = Peer(neighbor, self)
                self.peers[key] = peer
            # modified peer
            elif self.peers[key].neighbor != neighbor:
                self.logger.reactor(
                    'peer definition change, establishing a new connection for %s'
                    % str(key))
                self.peers[key].reestablish(neighbor)
            # same peer but perhaps not the routes
            else:
                # finding what route changed and sending the delta is not obvious
                self.logger.reactor(
                    'peer definition identical, updating peer routes if required for %s'
                    % str(key))
                self.peers[key].reconfigure(neighbor)
            for ip in self.ips:
                if ip.afi == neighbor.peer_address.afi:
                    self._setup_listener(ip, neighbor.peer_address, self.port,
                                         neighbor.md5_password,
                                         neighbor.md5_base64, None)
        self.logger.configuration('loaded new configuration successfully',
                                  'info')

        return True

    def _scheduled_listener(self, flipflop=[]):
        try:
            for generator in self._async:
                try:
                    six.next(generator)
                    six.next(generator)
                    flipflop.append(generator)
                except StopIteration:
                    pass
            self._async, flipflop = flipflop, self._async
            return len(self._async) > 0
        except KeyboardInterrupt:
            self._termination('^C received')
            return False

    def _scheduled_api(self):
        try:
            # read at least on message per process if there is some and parse it
            for service, command in self.processes.received():
                self.api.text(self, service, command)

            # if we have nothing to do, return or save the work
            if not self._running:
                if not self._pending:
                    return False
                self._running, name = self._pending.popleft()
                self.logger.reactor('callback | installing %s' % name)

            if self._running:
                # run it
                try:
                    self.logger.reactor('callback | running')
                    six.next(self._running)  # run
                    # should raise StopIteration in most case
                    # and prevent us to have to run twice to run one command
                    six.next(self._running)  # run
                except StopIteration:
                    self._running = None
                    self.logger.reactor('callback | removing')
                return True
            return False

        except KeyboardInterrupt:
            self._termination('^C received')
            return False

    def route_send(self):
        """the process ran and we need to figure what routes to changes"""
        self.logger.reactor('performing dynamic route update')
        for key in self.configuration.neighbors.keys():
            self.peers[key].send_new()
        self.logger.reactor('updated peers dynamic routes successfully')

    def restart(self):
        """kill the BGP session and restart it"""
        self.logger.reactor('performing restart of exabgp %s' % version)
        self.configuration.reload()

        for key in self.peers.keys():
            if key not in self.configuration.neighbors.keys():
                neighbor = self.configuration.neighbors[key]
                self.logger.reactor('removing Peer %s' % neighbor.name())
                self.peers[key].stop()
            else:
                self.peers[key].reestablish()
        self.processes.terminate()
        self.processes.start()

    def _unschedule(self, peer):
        if peer in self.peers:
            del self.peers[peer]

    def answer(self, service, string):
        if self.ack:
            self.always_answer(service, string)

    def always_answer(self, service, string):
        self.processes.write(service, string)
        self.logger.reactor('responding to %s : %s' %
                            (service, string.replace('\n', '\\n')))

    def api_shutdown(self):
        self._shutdown = True
        self._pending = deque()
        self._running = None

    def api_reload(self):
        self._reload = True
        self._pending = deque()
        self._running = None

    def api_restart(self):
        self._restart = True
        self._pending = deque()
        self._running = None

    @staticmethod
    def match_neighbor(description, name):
        for string in description:
            if re.search(r'(^|[\s])%s($|[\s,])' % re.escape(string),
                         name) is None:
                return False
        return True

    def match_neighbors(self, descriptions):
        """return the sublist of peers matching the description passed, or None if no description is given"""
        if not descriptions:
            return self.peers.keys()

        returned = []
        for key in self.peers:
            for description in descriptions:
                if Reactor.match_neighbor(description, key):
                    if key not in returned:
                        returned.append(key)
        return returned

    def nexthops(self, peers):
        return dict(
            (peer, self.peers[peer].neighbor.local_address) for peer in peers)

    def plan(self, callback, name):
        self._pending.append((callback, name))
コード例 #12
0
def run(env, comment, configurations, validate, pid=0):
    logger = Logger()

    logger.error('', source='ExaBGP')
    logger.error('%s' % version, source='version')
    logger.error('%s' % sys.version.replace('\n', ' '), source='interpreter')
    logger.error('%s' % ' '.join(platform.uname()[:5]), source='os')
    logger.error('', source='ExaBGP')

    if comment:
        logger.configuration(comment)

    warning = warn()
    if warning:
        logger.configuration(warning)

    if not env.profile.enable:
        ok = Reactor(configurations).run(validate)
        __exit(env.debug.memory, 0 if ok else 1)

    try:
        import cProfile as profile
    except ImportError:
        import profile

    if not env.profile.file or env.profile.file == 'stdout':
        ok = profile.run('Reactor(configurations).run(validate)')
        __exit(env.debug.memory, 0 if ok else 1)

    if pid:
        profile_name = "%s-pid-%d" % (env.profile.file, pid)
    else:
        profile_name = env.profile.file

    notice = ''
    if os.path.isdir(profile_name):
        notice = 'profile can not use this filename as output, it is not a directory (%s)' % profile_name
    if os.path.exists(profile_name):
        notice = 'profile can not use this filename as output, it already exists (%s)' % profile_name

    if not notice:
        logger.reactor('profiling ....')
        profiler = profile.Profile()
        profiler.enable()
        try:
            ok = Reactor(configurations).run()
        except Exception:
            raise
        finally:
            profiler.disable()
            kprofile = lsprofcalltree.KCacheGrind(profiler)

            with open(profile_name, 'w+') as write:
                kprofile.output(write)

            __exit(env.debug.memory, 0 if ok else 1)
    else:
        logger.reactor("-" * len(notice))
        logger.reactor(notice)
        logger.reactor("-" * len(notice))
        Reactor(configurations).run()
        __exit(env.debug.memory, 1)
コード例 #13
0
ファイル: loop.py プロジェクト: ravikumar727/exabgp
class Reactor(object):
    # [hex(ord(c)) for c in os.popen('clear').read()]
    clear = concat_bytes_i(
        character(int(c, 16))
        for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a'])

    def __init__(self, configurations):
        self._ips = environment.settings().tcp.bind
        self._port = environment.settings().tcp.port
        self._stopping = environment.settings().tcp.once

        self.max_loop_time = environment.settings().reactor.speed
        self.early_drop = environment.settings().daemon.drop

        self.processes = None

        self.configuration = Configuration(configurations)
        self.logger = Logger()
        self. async = ASYNC()
        self.signal = Signal()
        self.daemon = Daemon(self)
        self.listener = Listener(self)
        self.api = API(self)

        self.peers = {}

        self._reload_processes = False
        self._saved_pid = False

    def _termination(self, reason):
        self.signal.received = Signal.SHUTDOWN
        self.logger.reactor(reason, 'warning')

    def _api_ready(self, sockets):
        sleeptime = self.max_loop_time / 100
        fds = self.processes.fds()
        ios = fds + sockets
        try:
            read, _, _ = select.select(ios, [], [], sleeptime)
            for fd in fds:
                if fd in read:
                    read.remove(fd)
            return read
        except select.error as exc:
            err_no, message = exc.args  # pylint: disable=W0633
            if err_no not in error.block:
                raise exc
            return []
        except socket.error as exc:
            # python 3 does not raise on closed FD, but python2 does
            # we have lost a peer and it is causing the select
            # to complain, the code will self-heal, ignore the issue
            # (EBADF from python2 must be ignored if when checkign error.fatal)
            # otherwise sending  notification causes TCP to drop and cause
            # this code to kill ExaBGP
            return []
        except ValueError as exc:
            # The peer closing the TCP connection lead to a negative file descritor
            return []
        except KeyboardInterrupt:
            self._termination('^C received')
            return []

    def schedule_rib_check(self):
        self.logger.reactor('performing dynamic route update')
        for key in self.configuration.neighbors.keys():
            self.peers[key].schedule_rib_check()

    def _active_peers(self):
        peers = set()
        for key, peer in self.peers.items():
            if not peer.neighbor.passive or peer.proto:
                peers.add(key)
        return peers

    def run(self, validate, root):
        self.daemon.daemonise()

        # Make sure we create processes once we have closed file descriptor
        # unfortunately, this must be done before reading the configuration file
        # so we can not do it with dropped privileges
        self.processes = Processes()

        # we have to read the configuration possibly with root privileges
        # as we need the MD5 information when we bind, and root is needed
        # to bind to a port < 1024

        # this is undesirable as :
        # - handling user generated data as root should be avoided
        # - we may not be able to reload the configuration once the privileges are dropped

        # but I can not see any way to avoid it
        for ip in self._ips:
            if not self.listener.listen_on(ip, None, self._port, None, False,
                                           None):
                return False

        if not self.load():
            return False

        if validate:  # only validate configuration
            self.logger.configuration('')
            self.logger.configuration('Parsed Neighbors, un-templated')
            self.logger.configuration('------------------------------')
            self.logger.configuration('')
            for key in self.peers:
                self.logger.configuration(str(self.peers[key].neighbor))
                self.logger.configuration('')
            return True

        for neighbor in self.configuration.neighbors.values():
            if neighbor.listen:
                if not self.listener.listen_on(
                        neighbor.md5_ip, neighbor.peer_address,
                        neighbor.listen, neighbor.md5_password,
                        neighbor.md5_base64, neighbor.ttl_in):
                    return False

        if not self.early_drop:
            self.processes.start(self.configuration.processes)

        if not self.daemon.drop_privileges():
            self.logger.reactor(
                'Could not drop privileges to \'%s\' refusing to run as root' %
                self.daemon.user, 'critical')
            self.logger.reactor(
                'Set the environmemnt value exabgp.daemon.user to change the unprivileged user',
                'critical')
            return

        if self.early_drop:
            self.processes.start(self.configuration.processes)

        # This is required to make sure we can write in the log location as we now have dropped root privileges
        if not self.logger.restart():
            self.logger.reactor('Could not setup the logger, aborting',
                                'critical')
            return

        if not self.daemon.savepid():
            return

        # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ?
        reload_completed = False

        wait = environment.settings().tcp.delay
        if wait:
            sleeptime = (wait * 60) - int(time.time()) % (wait * 60)
            self.logger.reactor('waiting for %d seconds before connecting' %
                                sleeptime)
            time.sleep(float(sleeptime))

        workers = {}
        peers = set()

        while True:
            try:
                if self.signal.received:
                    for key in self.peers:
                        if self.peers[key].neighbor.api['signal']:
                            self.peers[key].reactor.processes.signal(
                                self.peers[key].neighbor, self.signal.number)

                    signaled = self.signal.received
                    self.signal.rearm()

                    if signaled == Signal.SHUTDOWN:
                        self.shutdown()
                        break

                    if signaled == Signal.RESTART:
                        self.restart()
                        continue

                    if not reload_completed:
                        continue

                    if signaled == Signal.RELOAD:
                        self._reload_processes = True

                    if signaled in (Signal.RELOAD, Signal.FULL_RELOAD):
                        self.load()
                        self.processes.start(self.configuration.processes,
                                             self._reload_processes)
                        self._reload_processes = False
                        continue

                if self.listener.incoming():
                    # check all incoming connection
                    self. async .schedule(str(uuid.uuid1()),
                                          'check new connection',
                                          self.listener.new_connections())

                peers = self._active_peers()
                if not peers:
                    reload_completed = True

                # give a turn to all the peers
                for key in list(peers):
                    peer = self.peers[key]
                    action = peer.run()

                    # .run() returns an ACTION enum:
                    # * immediate if it wants to be called again
                    # * later if it should be called again but has no work atm
                    # * close if it is finished and is closing down, or restarting
                    if action == ACTION.CLOSE:
                        if key in self.peers:
                            del self.peers[key]
                        peers.discard(key)
                    # we are loosing this peer, not point to schedule more process work
                    elif action == ACTION.LATER:
                        for io in peer.sockets():
                            workers[io] = key
                        # no need to come back to it before a a full cycle
                        peers.discard(key)

                    if not peers:
                        break

                # read at least on message per process if there is some and parse it
                for service, command in self.processes.received():
                    self.api.text(self, service, command)

                self. async .run()

                for io in self._api_ready(list(workers)):
                    peers.add(workers[io])
                    del workers[io]

                if self._stopping and not self.peers.keys():
                    break

            except KeyboardInterrupt:
                self._termination('^C received')
            # socket.error is a subclass of IOError (so catch it first)
            except socket.error:
                self._termination('socket error received')
            except IOError:
                self._termination(
                    'I/O Error received, most likely ^C during IO')
            except SystemExit:
                self._termination('exiting')
            except ProcessError:
                self._termination(
                    'Problem when sending message(s) to helper program, stopping'
                )
            except select.error:
                self._termination('problem using select, stopping')

    def shutdown(self):
        """Terminate all the current BGP connections"""
        self.logger.reactor('performing shutdown')
        if self.listener:
            self.listener.stop()
            self.listener = None
        for key in self.peers.keys():
            self.peers[key].stop()
        self. async .clear()
        self.processes.terminate()
        self.daemon.removepid()
        self._stopping = True

    def load(self):
        """Reload the configuration and send to the peer the route which changed"""
        self.logger.reactor('performing reload of exabgp %s' % version)

        reloaded = self.configuration.reload()

        if not reloaded:
            #
            # Careful the string below is used but the QA code to check for sucess of failure
            self.logger.configuration(
                'problem with the configuration file, no change done', 'error')
            # Careful the string above is used but the QA code to check for sucess of failure
            #
            self.logger.configuration(str(self.configuration.error), 'error')
            return False

        for key, peer in self.peers.items():
            if key not in self.configuration.neighbors:
                self.logger.reactor('removing peer: %s' % peer.neighbor.name())
                peer.stop()

        for key, neighbor in self.configuration.neighbors.items():
            # new peer
            if key not in self.peers:
                self.logger.reactor('new peer: %s' % neighbor.name())
                peer = Peer(neighbor, self)
                self.peers[key] = peer
            # modified peer
            elif self.peers[key].neighbor != neighbor:
                self.logger.reactor(
                    'peer definition change, establishing a new connection for %s'
                    % str(key))
                self.peers[key].reestablish(neighbor)
            # same peer but perhaps not the routes
            else:
                # finding what route changed and sending the delta is not obvious
                self.logger.reactor(
                    'peer definition identical, updating peer routes if required for %s'
                    % str(key))
                self.peers[key].reconfigure(neighbor)
            for ip in self._ips:
                if ip.afi == neighbor.peer_address.afi:
                    self.listener.listen_on(ip, neighbor.peer_address,
                                            self._port, neighbor.md5_password,
                                            neighbor.md5_base64, None)
        self.logger.configuration('loaded new configuration successfully',
                                  'info')

        return True

    def restart(self):
        """Kill the BGP session and restart it"""
        self.logger.reactor('performing restart of exabgp %s' % version)
        self.configuration.reload()

        for key in self.peers.keys():
            if key not in self.configuration.neighbors.keys():
                neighbor = self.configuration.neighbors[key]
                self.logger.reactor('removing Peer %s' % neighbor.name())
                self.peers[key].stop()
            else:
                self.peers[key].reestablish()
        self.processes.start(self.configuration.processes, True)
コード例 #14
0
ファイル: bgp.py プロジェクト: szhong-jnpr/exabgp
def main ():
	options = docopt.docopt(usage, help=False)

	major = int(sys.version[0])
	minor = int(sys.version[2])

	if major != 2 or minor < 5:
		sys.exit('This program can not work (is not tested) with your python version (< 2.5 or >= 3.0)')

	if options["--version"]:
		print('ExaBGP : %s' % version)
		print('Python : %s' % sys.version.replace('\n',' '))
		print('Uname  : %s' % ' '.join(platform.uname()[:5]))
		sys.exit(0)

	if options["--folder"]:
		folder = os.path.realpath(os.path.normpath(options["--folder"]))
	elif sys.argv[0].endswith('/bin/exabgp'):
		folder = sys.argv[0][:-len('/bin/exabgp')] + '/etc/exabgp'
	elif sys.argv[0].endswith('/sbin/exabgp'):
		folder = sys.argv[0][:-len('/sbin/exabgp')] + '/etc/exabgp'
	else:
		folder = '/etc/exabgp'

	os.environ['EXABGP_ETC'] = folder  # This is not most pretty

	if options["--run"]:
		sys.argv = sys.argv[sys.argv.index('--run')+1:]
		if sys.argv[0] == 'healthcheck':
			from exabgp.application import run_healthcheck
			run_healthcheck()
		elif sys.argv[0] == 'cli':
			from exabgp.application import run_cli
			run_cli()
		else:
			print(usage)
			sys.exit(0)
		return

	envfile = 'exabgp.env' if not options["--env"] else options["--env"]
	if not envfile.startswith('/'):
		envfile = '%s/%s' % (folder, envfile)

	from exabgp.configuration.setup import environment

	try:
		env = environment.setup(envfile)
	except environment.Error as exc:
		print(usage)
		print('\nconfiguration issue,', str(exc))
		sys.exit(1)

	# Must be done before setting the logger as it modify its behaviour

	if options["--debug"]:
		env.log.all = True
		env.log.level = syslog.LOG_DEBUG

	logger = Logger()

	named_pipe = os.environ.get('NAMED_PIPE','')
	if named_pipe:
		from exabgp.application.control import main as control
		control(named_pipe)
		sys.exit(0)

	if options["--decode"]:
		decode = ''.join(options["--decode"]).replace(':','').replace(' ','')
		if not is_bgp(decode):
			print(usage)
			print('Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default()))
			print("")
			print("The BGP message must be an hexadecimal string.")
			print("")
			print("All colons or spaces are ignored, for example:")
			print("")
			print("  --decode 001E0200000007900F0003000101")
			print("  --decode 001E:02:0000:0007:900F:0003:0001:01")
			print("  --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101")
			print("  --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01")
			print("  --decode 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101'")
			sys.exit(1)
	else:
		decode = ''

	# Make sure our child has a named pipe name
	if env.api.file:
		os.environ['NAMED_PIPE'] = env.api.file

	duration = options["--signal"]
	if duration and duration.isdigit():
		pid = os.fork()
		if pid:
			import time
			import signal
			try:
				time.sleep(int(duration))
				os.kill(pid,signal.SIGUSR1)
			except KeyboardInterrupt:
				pass
			try:
				pid,code = os.wait()
				sys.exit(code)
			except KeyboardInterrupt:
				try:
					pid,code = os.wait()
					sys.exit(code)
				except Exception:
					sys.exit(0)

	if options["--help"]:
		print(usage)
		print('Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default()))
		sys.exit(0)

	if options["--decode"]:
		env.log.parser = True
		env.debug.route = decode
		env.tcp.bind = ''

	if options["--profile"]:
		env.profile.enable = True
		if options["--profile"].lower() in ['1','true']:
			env.profile.file = True
		elif options["--profile"].lower() in ['0','false']:
			env.profile.file = False
		else:
			env.profile.file = options["--profile"]

	if envfile and not os.path.isfile(envfile):
		comment = 'environment file missing\ngenerate it using "exabgp --fi > %s"' % envfile
	else:
		comment = ''

	if options["--full-ini"] or options["--fi"]:
		for line in environment.iter_ini():
			print(line)
		sys.exit(0)

	if options["--full-env"] or options["--fe"]:
		print()
		for line in environment.iter_env():
			print(line)
		sys.exit(0)

	if options["--diff-ini"] or options["--di"]:
		for line in environment.iter_ini(True):
			print(line)
		sys.exit(0)

	if options["--diff-env"] or options["--de"]:
		for line in environment.iter_env(True):
			print(line)
		sys.exit(0)

	if options["--once"]:
		env.tcp.once = True

	if options["--pdb"]:
		# The following may fail on old version of python (but is required for debug.py)
		os.environ['PDB'] = 'true'
		env.debug.pdb = True

	if options["--test"]:
		env.debug.selfcheck = True
		env.log.parser = True

	if options["--memory"]:
		env.debug.memory = True

	configurations = []
	# check the file only once that we have parsed all the command line options and allowed them to run
	if options["<configuration>"]:
		for f in options["<configuration>"]:
			normalised = os.path.realpath(os.path.normpath(f))
			if os.path.isfile(normalised):
				configurations.append(normalised)
				continue
			if f.startswith('etc/exabgp'):
				normalised = os.path.join(folder,f[11:])
				if os.path.isfile(normalised):
					configurations.append(normalised)
					continue

			logger.configuration('one of the arguments passed as configuration is not a file (%s)' % f,'error')
			sys.exit(1)

	else:
		print(usage)
		print('Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default()))
		print('\nno configuration file provided')
		sys.exit(1)

	from exabgp.bgp.message.update.attribute import Attribute
	Attribute.caching = env.cache.attributes

	if env.debug.rotate or len(configurations) == 1:
		run(env,comment,configurations)

	if not (env.log.destination in ('syslog','stdout','stderr') or env.log.destination.startswith('host:')):
		logger.configuration('can not log to files when running multiple configuration (as we fork)','error')
		sys.exit(1)

	try:
		# run each configuration in its own process
		pids = []
		for configuration in configurations:
			pid = os.fork()
			if pid == 0:
				run(env,comment,[configuration],os.getpid())
			else:
				pids.append(pid)

		# If we get a ^C / SIGTERM, ignore just continue waiting for our child process
		import signal
		signal.signal(signal.SIGINT, signal.SIG_IGN)

		# wait for the forked processes
		for pid in pids:
			os.waitpid(pid,0)
	except OSError as exc:
		logger.reactor('Can not fork, errno %d : %s' % (exc.errno,exc.strerror),'critical')
		sys.exit(1)
コード例 #15
0
ファイル: __init__.py プロジェクト: Shmuma/exabgp
class Configuration (object):
	def __init__ (self, configurations, text=False):
		self.api_encoder = environment.settings().api.encoder

		self.logger = Logger()
		self._configurations = configurations

		self.error = Error()
		self.tokens = Tokeniser(self.error,self.logger)
		self.neighbor = ParseNeighbor(self.error,self.logger)
		self.family = ParseFamily(self.error)
		self.process = ParseProcess(self.error)
		self.route = ParseRoute(self.error)
		self.flow = ParseFlow(self.error,self.logger)
		self.l2vpn = ParseL2VPN(self.error)
		self.operational = ParseOperational(self.error)

		self._tree = {
			'configuration': {
				'neighbor':    (self._multi_neighbor,self.neighbor.make),
				'group':       (self._multi_group,true),
			},
			'group': {
				'neighbor':    (self._multi_neighbor,self.neighbor.make),
				'static':      (self._multi_static,true),
				'flow':        (self._multi_flow,true),
				'l2vpn':       (self._multi_l2vpn,true),
				'process':     (self._multi_process,true),
				'family':      (self._multi_family,true),
				'capability':  (self._multi_capability,true),
				'operational': (self._multi_operational,true),
			},
			'neighbor': {
				'static':      (self._multi_static,true),
				'flow':        (self._multi_flow,true),
				'l2vpn':       (self._multi_l2vpn,true),
				'process':     (self._multi_process,true),
				'family':      (self._multi_family,true),
				'capability':  (self._multi_capability,true),
				'operational': (self._multi_operational,true),
			},
			'static': {
				'route':       (self._multi_static_route,self.route.check_static_route),
			},
			'flow': {
				'route':       (self._multi_flow_route,self.flow.check_flow),
			},
			'l2vpn': {
				'vpls':       (self._multi_l2vpn_vpls,self.l2vpn.check_vpls),
			},
			'flow-route': {
				'match':       (self._multi_match,true),
				'then':        (self._multi_then,true),
			},
			'process': {
				'send':    (self._multi_api,true),
				'receive': (self._multi_api,true),
			}
		}

		self._command = {
			'group': {
				'description':   self.neighbor.description,
				'router-id':     self.neighbor.router_id,
				'host-name':     self.neighbor.hostname,
				'domain-name':   self.neighbor.domainname,
				'local-address': self.neighbor.ip,
				'local-as':      self.neighbor.asn,
				'peer-as':       self.neighbor.asn,
				'passive':       self.neighbor.passive,
				'listen':        self.neighbor.listen,
				'hold-time':     self.neighbor.holdtime,
				'md5':           self.neighbor.md5,
				'ttl-security':  self.neighbor.ttl,
				'group-updates': self.neighbor.groupupdate,
				'adj-rib-out':   self.neighbor.adjribout,
				'auto-flush':    self.neighbor.autoflush,
			},
			'neighbor': {
				'description':   self.neighbor.description,
				'router-id':     self.neighbor.router_id,
				'host-name':     self.neighbor.hostname,
				'domain-name':   self.neighbor.domainname,
				'local-address': self.neighbor.ip,
				'local-as':      self.neighbor.asn,
				'peer-as':       self.neighbor.asn,
				'passive':       self.neighbor.passive,
				'listen':        self.neighbor.listen,
				'hold-time':     self.neighbor.holdtime,
				'md5':           self.neighbor.md5,
				'ttl-security':  self.neighbor.ttl,
				'group-updates': self.neighbor.groupupdate,
				'adj-rib-out':   self.neighbor.adjribout,
				'auto-flush':    self.neighbor.autoflush,
			},
			'capability': {
				'route-refresh':    self.neighbor.capability.refresh,
				'graceful-restart': self.neighbor.capability.gracefulrestart,
				'multi-session':    self.neighbor.capability.multisession,
				'add-path':         self.neighbor.capability.addpath,
				'aigp':             self.neighbor.capability.aigp,
				'operational':      self.neighbor.capability.operational,
				'add-path':         self.neighbor.capability.addpath,
				'asn4':             self.neighbor.capability.asn4,
			},
			'process': {
				'run':              self.process.run,
				'encoder':          self.process.encoder,
				'neighbor-changes': self.process.command,
			},
			'family': {
				'ipv4':    self.family.ipv4,
				'ipv6':    self.family.ipv6,
				'l2vpn':   self.family.l2vpn,
				'minimal': self.family.minimal,
				'all':     self.family.all,
			},
			'static': {
				'route':   self.route.static,
			},
			'l2vpn': {
				'vpls':    self.l2vpn.vpls,
			},
			'operational': {
				'asm':     self.operational.asm,
				# it makes no sense to have adm or others
			},
			'static-route': self.route.command,
			# 'inet-route': {
			# 'mpls-route': {
			'l2vpn-vpls':   self.l2vpn.command,
			'flow-route': {
				'rd':                  self.route.rd,
				'route-distinguisher': self.route.rd,
				'next-hop':            self.flow.next_hop,
			},
			'flow-match': {
				'source':              self.flow.source,
				'source-ipv4':         self.flow.source,
				'destination':         self.flow.destination,
				'destination-ipv4':    self.flow.destination,
				'port':                self.flow.anyport,
				'source-port':         self.flow.source_port,
				'destination-port':    self.flow.destination_port,
				'protocol':            self.flow.protocol,
				'next-header':         self.flow.next_header,
				'tcp-flags':           self.flow.tcp_flags,
				'icmp-type':           self.flow.icmp_type,
				'icmp-code':           self.flow.icmp_code,
				'fragment':            self.flow.fragment,
				'dscp':                self.flow.dscp,
				'traffic-class':       self.flow.traffic_class,
				'packet-length':       self.flow.packet_length,
				'flow-label':          self.flow.flow_label,
			},
			'flow-then': {
				'accept':              self.flow.accept,
				'discard':             self.flow.discard,
				'rate-limit':          self.flow.rate_limit,
				'redirect':            self.flow.redirect,
				'redirect-to-nexthop': self.flow.redirect_next_hop,
				'copy':                self.flow.copy,
				'mark':                self.flow.mark,
				'action':              self.flow.action,
				'community':           self.route.community,
				'extended-community':  self.route.extended_community,
			},
			'send': {
				'parsed':              self.process.command,
				'packets':             self.process.command,
				'consolidate':         self.process.command,
				'open':                self.process.command,
				'update':              self.process.command,
				'notification':        self.process.command,
				'keepalive':           self.process.command,
				'refresh':             self.process.command,
				'operational':         self.process.command,
			},
			'receive': {
				'parsed':              self.process.command,
				'packets':             self.process.command,
				'consolidate':         self.process.command,
				'open':                self.process.command,
				'update':              self.process.command,
				'notification':        self.process.command,
				'keepalive':           self.process.command,
				'refresh':             self.process.command,
				'operational':         self.process.command,
			},
		}

		self._clear()

		self.processes = {}

		self._scope = []
		self._location = ['root']

	def _clear (self):
		self.processes = {}

		self._scope = []
		self._location = ['root']

		self.tokens.clear()
		self.error.clear()
		self.neighbor.clear()
		self.family.clear()
		self.process.clear()
		self.route.clear()
		self.flow.clear()
		self.l2vpn.clear()
		self.operational.clear()
	# Public Interface

	def reload (self):
		try:
			return self._reload()
		except KeyboardInterrupt:
			return self.error.set('configuration reload aborted by ^C or SIGINT')
		except Exception:
			# unhandled configuration parsing issue
			raise

	def _reload (self):
		# taking the first configuration available (FIFO buffer)
		fname = self._configurations.pop(0)
		self.process.configuration(fname)
		self._configurations.append(fname)

		# clearing the current configuration to be able to re-parse it
		self._clear()

		if not self.tokens.set_file(fname):
			return False

		# parsing the configuration
		r = False
		while not self.tokens.finished:
			r = self._dispatch(
				self._scope,'root','configuration',
				self._tree['configuration'].keys(),
				[]
			)
			if r is False:
				break

		# handling possible parsing errors
		if r not in [True,None]:
			# making sure nothing changed
			self.neighbor.cancel()
			return self.error.set(
				"\n"
				"syntax error in section %s\n"
				"line %d: %s\n"
				"\n%s" % (
					self._location[-1],
					self.tokens.number,
					' '.join(self.tokens.line),
					str(self.error)
				)
			)

		# installing in the neighbor the API routes
		self.neighbor.complete()

		# we are not really running the program, just want to ....
		if environment.settings().debug.route:
			from exabgp.configuration.current.check import check_message
			if check_message(self.neighbor.neighbors,environment.settings().debug.route):
				sys.exit(0)
			sys.exit(1)

		# we are not really running the program, just want check the configuration validity
		if environment.settings().debug.selfcheck:
			from exabgp.configuration.current.check import check_neighbor
			if check_neighbor(self.neighbor.neighbors):
				sys.exit(0)
			sys.exit(1)

		return True

	# name is not used yet but will come really handy if we have name collision :D
	def _dispatch (self, scope, name, command, multi, single, location=None):
		if location:
			self._location = location
			self.flow.clear()
		try:
			tokens = self.tokens.next()
		except IndexError:
			return self.error.set('configuration file incomplete (most likely missing })')
		self.logger.configuration("parsing | %-13s | '%s'" % (command,"' '".join(tokens)))
		end = tokens[-1]
		if multi and end == '{':
			self._location.append(tokens[0])
			return self._multi_line(scope,command,tokens[1],tokens[:-1],multi)
		if single and end == ';':
			return self.run(scope,command,tokens[1],tokens[:-1],single)
		if end == '}':
			if len(self._location) == 1:
				return self.error.set('closing too many parenthesis')
			self._location.pop(-1)
			return None
		return False

	def _multi (self, tree, scope, name, command, tokens, valid):
		command = tokens[0]

		if valid and command not in valid:
			return self.error.set('option %s in not valid here' % command)

		if name not in tree:
			return self.error.set('option %s is not allowed here' % name)

		run, validate = tree[name].get(command,(false,false))
		if not run(scope,name,command,tokens[1:]):
			return False
		if not validate(scope,self):
			return False
		return True

	def _multi_line (self, scope, name, command, tokens, valid):
		return self._multi(self._tree,scope,name,command,tokens,valid)

	# Programs used to control exabgp

	def _multi_process (self, scope, name, command, tokens):
		while True:
			r = self._dispatch(
				scope,name,'process',
				['send','receive'],
				[
					'run','encoder',
					'neighbor-changes',
				]
			)
			if r is False:
				return False
			if r is None:
				break

		name = tokens[0] if len(tokens) >= 1 else 'conf-only-%s' % str(time.time())[-6:]
		self.processes.setdefault(name,{})['neighbor'] = scope[-1]['peer-address'] if 'peer-address' in scope[-1] else '*'

		for key in ['neighbor-changes',]:
			self.processes[name][key] = scope[-1].pop(key,False)

		for direction in ['send','receive']:
			for action in ['packets','parsed','consolidate']:
				key = '%s-%s' % (direction,action)
				self.processes[name][key] = scope[-1].pop(key,False)

			for message in Message.CODE.MESSAGES:
				key = '%s-%d' % (direction,message)
				self.processes[name][key] = scope[-1].pop(key,False)

		run = scope[-1].pop('run','')
		if run:
			if len(tokens) != 1:
				return self.error.set(self.process.syntax)

			self.processes[name]['encoder'] = scope[-1].get('encoder','') or self.api_encoder
			self.processes[name]['run'] = run
			return True
		elif len(tokens):
			return self.error.set(self.process.syntax)

	# Limit the AFI/SAFI pair announced to peers

	def _multi_family (self, scope, name, command, tokens):
		# we know all the families we should use
		scope[-1]['families'] = []
		while True:
			r = self._dispatch(
				scope,name,'family',
				[],
				self._command['family'].keys()
			)
			if r is False:
				return False
			if r is None:
				break
		self.family.clear()
		return True

	# capacity

	def _multi_capability (self, scope, name, command, tokens):
		# we know all the families we should use
		while True:
			r = self._dispatch(
				scope,name,'capability',
				[],
				self._command['capability'].keys()
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	# route grouping with watchdog

	# Group Neighbor

	def _multi_group (self, scope, name, command, address):
		# if len(tokens) != 2:
		# 	return self.error.set('syntax: group <name> { <options> }')

		scope.append({})
		while True:
			r = self._dispatch(
				scope,name,'group',
				[
					'static','flow','l2vpn',
					'neighbor','process','family',
					'capability','operational'
				],
				self._command['neighbor'].keys()
			)
			if r is False:
				return False
			if r is None:
				scope.pop(-1)
				return True

	def _multi_neighbor (self, scope, name, command, tokens):
		if len(tokens) != 1:
			return self.error.set('syntax: neighbor <ip> { <options> }')

		address = tokens[0]
		scope.append({})
		try:
			scope[-1]['peer-address'] = IP.create(address)
		except (IndexError,ValueError,socket.error):
			return self.error.set('"%s" is not a valid IP address' % address)

		while True:
			r = self._dispatch(
				scope,name,'neighbor',
				[
					'static','flow','l2vpn',
					'process','family','capability','operational'
				],
				self._command['neighbor']
			)
			# XXX: THIS SHOULD ALLOW CAPABILITY AND NOT THE INDIVIDUAL SUB KEYS
			if r is False:
				return False
			if r is None:
				return True

	#  Group Static ................

	def _multi_static (self, scope, name, command, tokens):
		if len(tokens) != 0:
			return self.error.set('syntax: static { route; route; ... }')

		while True:
			r = self._dispatch(
				scope,name,'static',
				['route',],
				['route',]
			)
			if r is False:
				return False
			if r is None:
				return True

	# Group Route  ........

	def _multi_static_route (self, scope, name, command, tokens):
		if len(tokens) != 1:
			return self.error.set(self.route.syntax)

		if not self.route.insert_static_route(scope,name,command,tokens):
			return False

		while True:
			r = self._dispatch(
				scope,name,'static-route',
				self._command['static-route'].keys(),
				self._command['static-route'].keys()
			)
			if r is False:
				return False
			if r is None:
				return self.route.make_split(scope)

	def _multi_l2vpn (self, scope, name, command, tokens):
		if len(tokens) != 0:
			return self.error.set(self.l2vpn.syntax)

		while True:
			r = self._dispatch(
				scope,name,'l2vpn',
				['vpls',],
				['vpls',]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	def _multi_l2vpn_vpls (self, scope, name, command, tokens):
		if len(tokens) > 1:
			return self.error.set(self.l2vpn.syntax)

		if not self.l2vpn.insert_vpls(scope,name,command,tokens):
			return False

		while True:
			r = self._dispatch(
				scope,name,'l2vpn-vpls',
				self._command['l2vpn-vpls'].keys(),
				self._command['l2vpn-vpls'].keys()
			)
			if r is False:
				return False
			if r is None:
				break

		return True


	def _multi_flow (self, scope, name, command, tokens):
		if len(tokens) != 0:
			return self.error.set(self.flow.syntax)

		while True:
			r = self._dispatch(
				scope,name,'flow',
				['route',],
				[]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	def _insert_flow_route (self, scope, name, command, tokens=None):
		if self.flow.state != 'out':
			return self.error.set(self.flow.syntax)

		self.flow.state = 'match'

		try:
			attributes = Attributes()
			attributes[Attribute.CODE.EXTENDED_COMMUNITY] = ExtendedCommunities()
			flow = Change(Flow(),attributes)
		except ValueError:
			return self.error.set(self.flow.syntax)

		if 'announce' not in scope[-1]:
			scope[-1]['announce'] = []

		scope[-1]['announce'].append(flow)
		return True

	def _multi_flow_route (self, scope, name, command, tokens):
		if len(tokens) > 1:
			return self.error.set(self.flow.syntax)

		if not self._insert_flow_route(scope,name,command):
			return False

		while True:
			r = self._dispatch(
				scope,name,'flow-route',
				['match','then'],
				['rd','route-distinguisher','next-hop']
			)
			if r is False:
				return False
			if r is None:
				break

		if self.flow.state != 'out':
			return self.error.set(self.flow.syntax)

		return True

	# ..........................................

	def _multi_match (self, scope, name, command, tokens):
		if len(tokens) != 0:
			return self.error.set(self.flow.syntax)

		if self.flow.state != 'match':
			return self.error.set(self.flow.syntax)

		self.flow.state = 'then'

		while True:
			r = self._dispatch(
				scope,name,'flow-match',
				[],
				[
					'source','destination',
					'source-ipv4','destination-ipv4',
					'port','source-port','destination-port',
					'protocol','next-header','tcp-flags','icmp-type','icmp-code',
					'fragment','dscp','traffic-class','packet-length','flow-label'
				]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	def _multi_then (self, scope, name, command, tokens):
		if len(tokens) != 0:
			return self.error.set(self.flow.syntax)

		if self.flow.state != 'then':
			return self.error.set(self.flow.syntax)

		self.flow.state = 'out'

		while True:
			r = self._dispatch(
				scope,name,'flow-then',
				[],
				[
					'accept','discard','rate-limit',
					'redirect','copy','redirect-to-nexthop',
					'mark','action',
					'community','extended-community'
				]
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	# ..........................................

	def _multi_api (self, scope, name, command, tokens):
		if len(tokens) != 0:
			return self.error.set('api issue')

		while True:
			r = self._dispatch(
				scope,name,command,
				[],
				self._command[command].keys()
			)
			if r is False:
				return False
			if r is None:
				break
		return True

	#  Group Operational ................

	def _multi_operational (self, scope, name, command, tokens):
		if len(tokens) != 0:
			return self.error.set('syntax: operational { command; command; ... }')

		while True:
			r = self._dispatch(
				scope,name,command,
				[],
				self._command[command].keys()
			)
			if r is False:
				return False
			if r is None:
				return True

	def run (self, scope, name, comamnd, tokens, valid):
		command = tokens[0]
		if valid and command not in valid:
			return self.error.set('invalid keyword "%s"' % command)

		family = {
			'static-route': {
				'rd': SAFI.mpls_vpn,
				'route-distinguisher': SAFI.mpls_vpn,
			},
			'l2vpn-vpls': {
				'rd': SAFI.vpls,
				'route-distinguisher': SAFI.vpls,
			},
			'flow-route': {
				'rd': SAFI.flow_vpn,
				'route-distinguisher': SAFI.flow_vpn,
			}
		}

		if name in self._command:
			if command in self._command[name]:
				if command in family.get(name,{}):
					return self._command[name][command](scope,name,command,tokens[1:],family[name][command])
				return self._command[name][command](scope,name,command,tokens[1:])

		return self.error.set('command not known %s' % command)
コード例 #16
0
ファイル: bgp.py プロジェクト: qoke/exabgp
def main():
    options = docopt.docopt(usage, help=False)

    major = int(sys.version[0])
    minor = int(sys.version[2])

    if major != 2 or minor < 5:
        sys.exit(
            'This program can not work (is not tested) with your python version (< 2.5 or >= 3.0)'
        )

    if options["--version"]:
        print('ExaBGP : %s' % version)
        print('Python : %s' % sys.version.replace('\n', ' '))
        print('Uname  : %s' % ' '.join(platform.uname()[:5]))
        sys.exit(0)

    if options["--folder"]:
        folder = os.path.realpath(os.path.normpath(options["--folder"]))
    elif sys.argv[0].endswith('/bin/exabgp'):
        folder = sys.argv[0][:-len('/bin/exabgp')] + '/etc/exabgp'
    elif sys.argv[0].endswith('/sbin/exabgp'):
        folder = sys.argv[0][:-len('/sbin/exabgp')] + '/etc/exabgp'
    else:
        folder = '/etc/exabgp'

    os.environ['EXABGP_ETC'] = folder  # This is not most pretty

    if options["--run"]:
        sys.argv = sys.argv[sys.argv.index('--run') + 1:]
        if sys.argv[0] == 'healthcheck':
            from exabgp.application import run_healthcheck
            run_healthcheck()
        elif sys.argv[0] == 'cli':
            from exabgp.application import run_cli
            run_cli()
        else:
            print(usage)
            sys.exit(0)
        return

    envfile = 'exabgp.env' if not options["--env"] else options["--env"]
    if not envfile.startswith('/'):
        envfile = '%s/%s' % (folder, envfile)

    from exabgp.configuration.setup import environment

    try:
        env = environment.setup(envfile)
    except environment.Error as exc:
        print(usage)
        print('\nconfiguration issue,', str(exc))
        sys.exit(1)

    # Must be done before setting the logger as it modify its behaviour

    if options["--debug"]:
        env.log.all = True
        env.log.level = syslog.LOG_DEBUG

    logger = Logger()

    named_pipe = os.environ.get('NAMED_PIPE', '')
    if named_pipe:
        from exabgp.application.control import main as control
        control(named_pipe)
        sys.exit(0)

    if options["--decode"]:
        decode = ''.join(options["--decode"]).replace(':', '').replace(' ', '')
        if not is_bgp(decode):
            print(usage)
            print('Environment values are:\n' +
                  '\n'.join(' - %s' % _ for _ in environment.default()))
            print("")
            print("The BGP message must be an hexadecimal string.")
            print("")
            print("All colons or spaces are ignored, for example:")
            print("")
            print("  --decode 001E0200000007900F0003000101")
            print("  --decode 001E:02:0000:0007:900F:0003:0001:01")
            print(
                "  --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101"
            )
            print(
                "  --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01"
            )
            print(
                "  --decode 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101'"
            )
            sys.exit(1)
    else:
        decode = ''

    # Make sure our child has a named pipe name
    if env.api.file:
        os.environ['NAMED_PIPE'] = env.api.file

    duration = options["--signal"]
    if duration and duration.isdigit():
        pid = os.fork()
        if pid:
            import time
            import signal
            try:
                time.sleep(int(duration))
                os.kill(pid, signal.SIGUSR1)
            except KeyboardInterrupt:
                pass
            try:
                pid, code = os.wait()
                sys.exit(code)
            except KeyboardInterrupt:
                try:
                    pid, code = os.wait()
                    sys.exit(code)
                except Exception:
                    sys.exit(0)

    if options["--help"]:
        print(usage)
        print('Environment values are:\n' +
              '\n'.join(' - %s' % _ for _ in environment.default()))
        sys.exit(0)

    if options["--decode"]:
        env.log.parser = True
        env.debug.route = decode
        env.tcp.bind = ''

    if options["--profile"]:
        env.profile.enable = True
        if options["--profile"].lower() in ['1', 'true']:
            env.profile.file = True
        elif options["--profile"].lower() in ['0', 'false']:
            env.profile.file = False
        else:
            env.profile.file = options["--profile"]

    if envfile and not os.path.isfile(envfile):
        comment = 'environment file missing\ngenerate it using "exabgp --fi > %s"' % envfile
    else:
        comment = ''

    if options["--full-ini"] or options["--fi"]:
        for line in environment.iter_ini():
            print(line)
        sys.exit(0)

    if options["--full-env"] or options["--fe"]:
        print()
        for line in environment.iter_env():
            print(line)
        sys.exit(0)

    if options["--diff-ini"] or options["--di"]:
        for line in environment.iter_ini(True):
            print(line)
        sys.exit(0)

    if options["--diff-env"] or options["--de"]:
        for line in environment.iter_env(True):
            print(line)
        sys.exit(0)

    if options["--once"]:
        env.tcp.once = True

    if options["--pdb"]:
        # The following may fail on old version of python (but is required for debug.py)
        os.environ['PDB'] = 'true'
        env.debug.pdb = True

    if options["--test"]:
        env.debug.selfcheck = True
        env.log.parser = True

    if options["--memory"]:
        env.debug.memory = True

    configurations = []
    # check the file only once that we have parsed all the command line options and allowed them to run
    if options["<configuration>"]:
        for f in options["<configuration>"]:
            normalised = os.path.realpath(os.path.normpath(f))
            if os.path.isfile(normalised):
                configurations.append(normalised)
                continue
            if f.startswith('etc/exabgp'):
                normalised = os.path.join(folder, f[11:])
                if os.path.isfile(normalised):
                    configurations.append(normalised)
                    continue

            logger.configuration(
                'one of the arguments passed as configuration is not a file (%s)'
                % f, 'error')
            sys.exit(1)

    else:
        print(usage)
        print('Environment values are:\n' +
              '\n'.join(' - %s' % _ for _ in environment.default()))
        print('\nno configuration file provided')
        sys.exit(1)

    from exabgp.bgp.message.update.attribute import Attribute
    Attribute.caching = env.cache.attributes

    if env.debug.rotate or len(configurations) == 1:
        run(env, comment, configurations)

    if not (env.log.destination in ('syslog', 'stdout', 'stderr')
            or env.log.destination.startswith('host:')):
        logger.configuration(
            'can not log to files when running multiple configuration (as we fork)',
            'error')
        sys.exit(1)

    try:
        # run each configuration in its own process
        pids = []
        for configuration in configurations:
            pid = os.fork()
            if pid == 0:
                run(env, comment, [configuration], os.getpid())
            else:
                pids.append(pid)

        # If we get a ^C / SIGTERM, ignore just continue waiting for our child process
        import signal
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        # wait for the forked processes
        for pid in pids:
            os.waitpid(pid, 0)
    except OSError as exc:
        logger.reactor(
            'Can not fork, errno %d : %s' % (exc.errno, exc.strerror),
            'critical')
        sys.exit(1)
コード例 #17
0
ファイル: api.py プロジェクト: Shmuma/exabgp
class API (object):
	callback = {
		'text': {},
		'json': {},
	}

	# need to sort and reverse, in order for the shorter command to not used by error
	# "show neighbor" should not match "show neighbors"
	functions = sorted([
		'withdraw watchdog',
		'withdraw vpls',
		'withdraw route',
		'withdraw flow',
		'withdraw attribute',
		'version',
		'teardown',
		'shutdown',
		'show routes extensive',
		'show routes',
		'show neighbors',
		'show neighbor',
		'restart',
		'reload',
		'flush route',
		'announce watchdog',
		'announce vpls',
		'announce route-refresh',
		'announce route',
		'announce flow',
		'announce eor',
		'announce attribute',
		'announce operational',
	],reverse=True)

	def __init__ (self,reactor):
		self.reactor = reactor
		self.logger = Logger()
		self.parser = Parser.Text()

		try:
			for name in self.functions:
				self.callback['text'][name] = Command.Text.callback[name]
		except KeyError:
			raise RuntimeError('The code does not have an implementation for "%s", please code it !' % name)

	def text (self, reactor, service, command):
		for registered in self.functions:
			if registered in command:
				self.logger.reactor("callback | handling '%s' with %s" % (command,self.callback['text'][registered].func_name),'warning')
				# XXX: should we not test the return value ?
				self.callback['text'][registered](self,reactor,service,command)
				return True
		self.logger.reactor("Command from process not understood : %s" % command,'warning')
		return False

	def change_to_peers (self, change, peers):
		neighbors = self.reactor.configuration.neighbor.neighbors
		result = True
		for neighbor in neighbors:
			if neighbor in peers:
				if change.nlri.family() in neighbors[neighbor].families():
					neighbors[neighbor].rib.outgoing.insert_announced(change)
				else:
					self.logger.configuration('the route family is not configured on neighbor','error')
					result = False
		return result

	def eor_to_peers (self, family, peers):
		neighbors = self.reactor.configuration.neighbor.neighbors
		result = False
		for neighbor in neighbors:
			if neighbor in peers:
				result = True
				neighbors[neighbor].eor.append(family)
		return result

	def operational_to_peers (self, operational, peers):
		neighbors = self.reactor.configuration.neighbor.neighbors
		result = True
		for neighbor in neighbors:
			if neighbor in peers:
				if operational.family() in neighbors[neighbor].families():
					if operational.name == 'ASM':
						neighbors[neighbor].asm[operational.family()] = operational
					neighbors[neighbor].messages.append(operational)
				else:
					self.logger.configuration('the route family is not configured on neighbor','error')
					result = False
		return result

	def refresh_to_peers (self, refresh, peers):
		neighbors = self.reactor.configuration.neighbor.neighbors
		result = True
		for neighbor in neighbors:
			if neighbor in peers:
				family = (refresh.afi,refresh.safi)
				if family in neighbors[neighbor].families():
					neighbors[neighbor].refresh.append(refresh.__class__(refresh.afi,refresh.safi))
				else:
					result = False
		return result

	def shutdown (self):
		self.reactor.api_shutdown()
		return True

	def reload (self):
		self.reactor.api_reload()
		return True

	def restart (self):
		self.reactor.api_restart()
		return True
コード例 #18
0
ファイル: api.py プロジェクト: vincentbernat/exabgp
class API(object):
    callback = {
        'text': {},
        'json': {},
    }

    # need to sort and reverse, in order for the shorter command to not used by error
    # "show neighbor" should not match "show neighbors"
    functions = sorted([
        'withdraw watchdog',
        'withdraw vpls',
        'withdraw route',
        'withdraw flow',
        'withdraw attribute',
        'version',
        'teardown',
        'shutdown',
        'show routes extensive',
        'show routes',
        'show neighbors',
        'show neighbor',
        'restart',
        'reload',
        'flush route',
        'announce watchdog',
        'announce vpls',
        'announce route-refresh',
        'announce route',
        'announce flow',
        'announce eor',
        'announce attribute',
        'announce operational',
    ],
                       reverse=True)

    def __init__(self, reactor):
        self.reactor = reactor
        self.logger = Logger()
        self.parser = Parser.Text()

        try:
            for name in self.functions:
                self.callback['text'][name] = Command.Text.callback[name]
        except KeyError:
            raise RuntimeError(
                'The code does not have an implementation for "%s", please code it !'
                % name)

    def text(self, reactor, service, command):
        for registered in self.functions:
            if registered in command:
                self.logger.reactor(
                    "callback | handling '%s' with %s" %
                    (command, self.callback['text'][registered].func_name),
                    'warning')
                # XXX: should we not test the return value ?
                self.callback['text'][registered](self, reactor, service,
                                                  command)
                return True
        self.logger.reactor(
            "Command from process not understood : %s" % command, 'warning')
        return False

    def change_to_peers(self, change, peers):
        neighbors = self.reactor.configuration.neighbor.neighbors
        result = True
        for neighbor in neighbors:
            if neighbor in peers:
                if change.nlri.family() in neighbors[neighbor].families():
                    neighbors[neighbor].rib.outgoing.insert_announced(change)
                else:
                    self.logger.configuration(
                        'the route family is not configured on neighbor',
                        'error')
                    result = False
        return result

    def eor_to_peers(self, family, peers):
        neighbors = self.reactor.configuration.neighbor.neighbors
        result = False
        for neighbor in neighbors:
            if neighbor in peers:
                result = True
                neighbors[neighbor].eor.append(family)
        return result

    def operational_to_peers(self, operational, peers):
        neighbors = self.reactor.configuration.neighbor.neighbors
        result = True
        for neighbor in neighbors:
            if neighbor in peers:
                if operational.family() in neighbors[neighbor].families():
                    if operational.name == 'ASM':
                        neighbors[neighbor].asm[
                            operational.family()] = operational
                    neighbors[neighbor].messages.append(operational)
                else:
                    self.logger.configuration(
                        'the route family is not configured on neighbor',
                        'error')
                    result = False
        return result

    def refresh_to_peers(self, refresh, peers):
        neighbors = self.reactor.configuration.neighbor.neighbors
        result = True
        for neighbor in neighbors:
            if neighbor in peers:
                family = (refresh.afi, refresh.safi)
                if family in neighbors[neighbor].families():
                    neighbors[neighbor].refresh.append(
                        refresh.__class__(refresh.afi, refresh.safi))
                else:
                    result = False
        return result

    def shutdown(self):
        self.reactor.api_shutdown()
        return True

    def reload(self):
        self.reactor.api_reload()
        return True

    def restart(self):
        self.reactor.api_restart()
        return True
コード例 #19
0
ファイル: bgp.py プロジェクト: CadeLaRen/exabgp
	configurations = []
	# check the file only once that we have parsed all the command line options and allowed them to run
	if options["<configuration>"]:
		for f in options["<configuration>"]:
			normalised = os.path.realpath(os.path.normpath(f))
			if os.path.isfile(normalised):
				configurations.append(normalised)
				continue
			if f.startswith('etc/exabgp'):
				normalised = os.path.join(folder,f[11:])
				if os.path.isfile(normalised):
					configurations.append(normalised)
					continue

			logger.configuration('one of the arguments passed as configuration is not a file (%s)' % f,'error')
			sys.exit(1)

	else:
		print(usage)
		print 'Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default())
		print '\nno configuration file provided'
		sys.exit(1)

	from exabgp.bgp.message.update.attribute import Attribute
	Attribute.caching = env.cache.attributes

	if env.debug.rotate or len(configurations) == 1:
		run(env,comment,configurations)

	if not (env.log.destination in ('syslog','stdout','stderr') or env.log.destination.startswith('host:')):
コード例 #20
0
ファイル: loop.py プロジェクト: szhong-jnpr/exabgp
class Reactor (object):
	# [hex(ord(c)) for c in os.popen('clear').read()]
	clear = b''.join([chr_(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']])

	def __init__ (self, configurations):
		self.ip = environment.settings().tcp.bind
		self.port = environment.settings().tcp.port
		self.respawn = environment.settings().api.respawn

		self.max_loop_time = environment.settings().reactor.speed
		self.early_drop = environment.settings().daemon.drop

		self.logger = Logger()
		self.daemon = Daemon(self)
		self.processes = None
		self.listener = None
		self.configuration = Configuration(configurations)
		self.api = API(self)

		self.peers = {}
		self.route_update = False

		self._stopping = environment.settings().tcp.once
		self._shutdown = False
		self._reload = False
		self._reload_processes = False
		self._restart = False
		self._saved_pid = False
		self._pending = deque()
		self._running = None

		signal.signal(signal.SIGTERM, self.sigterm)
		signal.signal(signal.SIGHUP, self.sighup)
		signal.signal(signal.SIGALRM, self.sigalrm)
		signal.signal(signal.SIGUSR1, self.sigusr1)
		signal.signal(signal.SIGUSR2, self.sigusr2)

	def sigterm (self, signum, frame):
		self.logger.reactor('SIG TERM received - shutdown')
		self._shutdown = True

	def sighup (self, signum, frame):
		self.logger.reactor('SIG HUP received - shutdown')
		self._shutdown = True

	def sigalrm (self, signum, frame):
		self.logger.reactor('SIG ALRM received - restart')
		self._restart = True

	def sigusr1 (self, signum, frame):
		self.logger.reactor('SIG USR1 received - reload configuration')
		self._reload = True

	def sigusr2 (self, signum, frame):
		self.logger.reactor('SIG USR2 received - reload configuration and processes')
		self._reload = True
		self._reload_processes = True

	def ready (self, sockets, ios, sleeptime=0):
		# never sleep a negative number of second (if the rounding is negative somewhere)
		# never sleep more than one second (should the clock time change during two time.time calls)
		sleeptime = min(max(0.0,sleeptime),1.0)
		if not ios:
			time.sleep(sleeptime)
			return []
		try:
			read,_,_ = select.select(sockets+ios,[],[],sleeptime)
			return read
		except select.error as exc:
			errno,message = exc.args  # pylint: disable=W0633
			if errno not in error.block:
				raise exc
			return []
		except socket.error as exc:
			if exc.errno in error.fatal:
				raise exc
			return []

	def run (self):
		self.daemon.daemonise()

		# Make sure we create processes once we have closed file descriptor
		# unfortunately, this must be done before reading the configuration file
		# so we can not do it with dropped privileges
		self.processes = Processes(self)

		# we have to read the configuration possibly with root privileges
		# as we need the MD5 information when we bind, and root is needed
		# to bind to a port < 1024

		# this is undesirable as :
		# - handling user generated data as root should be avoided
		# - we may not be able to reload the configuration once the privileges are dropped

		# but I can not see any way to avoid it

		if not self.load():
			return False

		try:
			self.listener = Listener()

			if self.ip:
				self.listener.listen(IP.create(self.ip),IP.create('0.0.0.0'),self.port,None,None)
				self.logger.reactor('Listening for BGP session(s) on %s:%d' % (self.ip,self.port))

			for neighbor in self.configuration.neighbors.values():
				if neighbor.listen:
					self.listener.listen(neighbor.md5_ip,neighbor.peer_address,neighbor.listen,neighbor.md5_password,neighbor.ttl_in)
					self.logger.reactor('Listening for BGP session(s) on %s:%d%s' % (neighbor.md5_ip,neighbor.listen,' with MD5' if neighbor.md5_password else ''))
		except NetworkError as exc:
			self.listener = None
			if os.geteuid() != 0 and self.port <= 1024:
				self.logger.reactor('Can not bind to %s:%d, you may need to run ExaBGP as root' % (self.ip,self.port),'critical')
			else:
				self.logger.reactor('Can not bind to %s:%d (%s)' % (self.ip,self.port,str(exc)),'critical')
			self.logger.reactor('unset exabgp.tcp.bind if you do not want listen for incoming connections','critical')
			self.logger.reactor('and check that no other daemon is already binding to port %d' % self.port,'critical')
			sys.exit(1)

		if not self.early_drop:
			self.processes.start()

		if not self.daemon.drop_privileges():
			self.logger.reactor('Could not drop privileges to \'%s\' refusing to run as root' % self.daemon.user,'critical')
			self.logger.reactor('Set the environmemnt value exabgp.daemon.user to change the unprivileged user','critical')
			return

		if self.early_drop:
			self.processes.start()

		# This is required to make sure we can write in the log location as we now have dropped root privileges
		if not self.logger.restart():
			self.logger.reactor('Could not setup the logger, aborting','critical')
			return

		if not self.daemon.savepid():
			return

		# did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ?
		reload_completed = True

		wait = environment.settings().tcp.delay
		if wait:
			sleeptime = (wait * 60) - int(time.time()) % (wait * 60)
			self.logger.reactor('waiting for %d seconds before connecting' % sleeptime)
			time.sleep(float(sleeptime))

		workers = {}
		peers = set()
		scheduled = False

		while True:
			try:
				finished = False
				start = time.time()
				end = start + self.max_loop_time

				if self._shutdown:
					self._shutdown = False
					self.shutdown()
					break

				if self._reload and reload_completed:
					self._reload = False
					self.load()
					self.processes.start(self._reload_processes)
					self._reload_processes = False
				elif self._restart:
					self._restart = False
					self.restart()

				# We got some API routes to announce
				if self.route_update:
					self.route_update = False
					self.route_send()

				for peer in self.peers.keys():
					peers.add(peer)

				while start < time.time() < end and not finished:
					if self.peers:
						for key in list(peers):
							peer = self.peers[key]
							action = peer.run()

							# .run() returns an ACTION enum:
							# * immediate if it wants to be called again
							# * later if it should be called again but has no work atm
							# * close if it is finished and is closing down, or restarting
							if action == ACTION.CLOSE:
								self.unschedule(peer)
								peers.discard(key)
							# we are loosing this peer, not point to schedule more process work
							elif action == ACTION.LATER:
								for io in peer.sockets():
									workers[io] = key
								# no need to come back to it before a a full cycle
								peers.discard(key)

					if not peers:
						reload_completed = True

					if self.listener:
						for connection in self.listener.connected():
							# found
							# * False, not peer found for this TCP connection
							# * True, peer found
							# * None, conflict found for this TCP connections
							found = False
							for key in self.peers:
								peer = self.peers[key]
								neighbor = peer.neighbor
								# XXX: FIXME: Inet can only be compared to Inet
								if connection.local == str(neighbor.peer_address) and connection.peer == str(neighbor.local_address):
									if peer.incoming(connection):
										found = True
										break
									found = None
									break

							if found:
								self.logger.reactor('accepted connection from  %s - %s' % (connection.local,connection.peer))
							elif found is False:
								self.logger.reactor('no session configured for  %s - %s' % (connection.local,connection.peer))
								connection.notification(6,3,'no session configured for the peer')
								connection.close()
							elif found is None:
								self.logger.reactor('connection refused (already connected to the peer) %s - %s' % (connection.local,connection.peer))
								connection.notification(6,5,'could not accept the connection')
								connection.close()

					scheduled = self.schedule()
					finished = not peers and not scheduled

				# RFC state that we MUST not send more than one KEEPALIVE / sec
				# And doing less could cause the session to drop

				if finished:
					for io in self.ready(list(peers),self.processes.fds(),end-time.time()):
						if io in workers:
							peers.add(workers[io])
							del workers[io]

				if self._stopping and not self.peers.keys():
					break

			except KeyboardInterrupt:
				while True:
					try:
						self._shutdown = True
						self.logger.reactor('^C received')
						break
					except KeyboardInterrupt:
						pass
			# socket.error is a subclass of IOError (so catch it first)
			except socket.error:
				try:
					self._shutdown = True
					self.logger.reactor('socket error received','warning')
					break
				except KeyboardInterrupt:
					pass
			except IOError:
				while True:
					try:
						self._shutdown = True
						self.logger.reactor('I/O Error received, most likely ^C during IO','warning')
						break
					except KeyboardInterrupt:
						pass
			except SystemExit:
				try:
					self._shutdown = True
					self.logger.reactor('exiting')
					break
				except KeyboardInterrupt:
					pass
			except ProcessError:
				try:
					self._shutdown = True
					self.logger.reactor('Problem when sending message(s) to helper program, stopping','error')
				except KeyboardInterrupt:
					pass
			except select.error:
				try:
					self._shutdown = True
					self.logger.reactor('problem using select, stopping','error')
				except KeyboardInterrupt:
					pass
				# from exabgp.leak import objgraph
				# print objgraph.show_most_common_types(limit=20)
				# import random
				# obj = objgraph.by_type('Route')[random.randint(0,2000)]
				# objgraph.show_backrefs([obj], max_depth=10)

	def shutdown (self):
		"""terminate all the current BGP connections"""
		self.logger.reactor('performing shutdown')
		if self.listener:
			self.listener.stop()
			self.listener = None
		for key in self.peers.keys():
			self.peers[key].stop()
		self.processes.terminate()
		self.daemon.removepid()
		self._stopping = True

	def load (self):
		"""reload the configuration and send to the peer the route which changed"""
		self.logger.reactor('performing reload of exabgp %s' % version)

		reloaded = self.configuration.reload()

		if not reloaded:
			#
			# Careful the string below is used but the QA code to check for sucess of failure
			self.logger.configuration('problem with the configuration file, no change done','error')
			# Careful the string above is used but the QA code to check for sucess of failure
			#
			self.logger.configuration(str(self.configuration.error),'error')
			return False

		for key, peer in self.peers.items():
			if key not in self.configuration.neighbors:
				self.logger.reactor('removing peer: %s' % peer.neighbor.name())
				peer.stop()

		for key, neighbor in self.configuration.neighbors.items():
			# new peer
			if key not in self.peers:
				self.logger.reactor('new peer: %s' % neighbor.name())
				peer = Peer(neighbor,self)
				self.peers[key] = peer
			# modified peer
			elif self.peers[key].neighbor != neighbor:
				self.logger.reactor('peer definition change, establishing a new connection for %s' % str(key))
				self.peers[key].reestablish(neighbor)
			# same peer but perhaps not the routes
			else:
				# finding what route changed and sending the delta is not obvious
				self.logger.reactor('peer definition identical, updating peer routes if required for %s' % str(key))
				self.peers[key].reconfigure(neighbor)
		self.logger.configuration('loaded new configuration successfully','info')

		return True

	def schedule (self):
		try:
			# read at least on message per process if there is some and parse it
			for service,command in self.processes.received():
				self.api.text(self,service,command)

			# if we have nothing to do, return or save the work
			if not self._running:
				if not self._pending:
					return False
				self._running,name = self._pending.popleft()
				self.logger.reactor('callback | installing %s' % name)

			if self._running:
				# run it
				try:
					self.logger.reactor('callback | running')
					six.next(self._running)  # run
					# should raise StopIteration in most case
					# and prevent us to have to run twice to run one command
					six.next(self._running)  # run
				except StopIteration:
					self._running = None
					self.logger.reactor('callback | removing')
				return True

		except StopIteration:
			pass
		except KeyboardInterrupt:
			self._shutdown = True
			self.logger.reactor('^C received','error')

	def route_send (self):
		"""the process ran and we need to figure what routes to changes"""
		self.logger.reactor('performing dynamic route update')
		for key in self.configuration.neighbors.keys():
			self.peers[key].send_new()
		self.logger.reactor('updated peers dynamic routes successfully')

	def restart (self):
		"""kill the BGP session and restart it"""
		self.logger.reactor('performing restart of exabgp %s' % version)
		self.configuration.reload()

		for key in self.peers.keys():
			if key not in self.configuration.neighbors.keys():
				neighbor = self.configuration.neighbors[key]
				self.logger.reactor('removing Peer %s' % neighbor.name())
				self.peers[key].stop()
			else:
				self.peers[key].reestablish()
		self.processes.terminate()
		self.processes.start()

	def unschedule (self, peer):
		key = peer.neighbor.name()
		if key in self.peers:
			del self.peers[key]

	def answer (self, service, string):
		self.processes.write(service,string)
		self.logger.reactor('responding to %s : %s' % (service,string.replace('\n','\\n')))

	def api_shutdown (self):
		self._shutdown = True
		self._pending = deque()
		self._running = None

	def api_reload (self):
		self._reload = True
		self._pending = deque()
		self._running = None

	def api_restart (self):
		self._restart = True
		self._pending = deque()
		self._running = None

	@staticmethod
	def match_neighbor (description, name):
		for string in description:
			if re.search(r'(^|[\s])%s($|[\s,])' % re.escape(string), name) is None:
				return False
		return True

	def match_neighbors (self, descriptions):
		"""return the sublist of peers matching the description passed, or None if no description is given"""
		if not descriptions:
			return self.peers.keys()

		returned = []
		for key in self.peers:
			for description in descriptions:
				if Reactor.match_neighbor(description,key):
					if key not in returned:
						returned.append(key)
		return returned

	def nexthops (self, peers):
		return dict((peer,self.peers[peer].neighbor.local_address) for peer in peers)

	def plan (self, callback,name):
		self._pending.append((callback,name))