Ejemplo n.º 1
0
def bug_report (dtype, value, trace):
	sys.stdout.flush()
	sys.stderr.flush()

	print(PANIC)
	sys.stdout.flush()
	sys.stderr.flush()

	import traceback

	print("-- Traceback\n\n")
	traceback.print_exception(dtype,value,trace)

	from exabgp.logger import Logger
	logger = Logger()

	print("\n\n-- Configuration\n\n")
	print(logger.config())
	print("\n\n-- Logging History\n\n")
	print(logger.history())
	print("\n\n\n")

	print(FOOTER)
	sys.stdout.flush()
	sys.stderr.flush()
Ejemplo n.º 2
0
class ReceiveTimer (object):
	def __init__ (self, session, holdtime, code, subcode, message=''):
		self.logger = Logger()
		self.session = session

		self.holdtime = holdtime
		self.last_print = time.time()
		self.last_read = time.time()

		self.code = code
		self.subcode = subcode
		self.message = message
		self.single = False

	def check_ka_timer (self, message=_NOP,ignore=_NOP.TYPE):
		if message.TYPE != ignore:
			self.last_read = time.time()
		if self.holdtime:
			left = int(self.last_read  + self.holdtime - time.time())
			if self.last_print != left:
				self.logger.debug('receive-timer %d second(s) left' % left,source='ka-'+self.session())
				self.last_print = left
			if left <= 0:
				raise Notify(self.code,self.subcode,self.message)
			return message.TYPE != KeepAlive.TYPE
		return False

	def check_ka (self, message=_NOP,ignore=_NOP.TYPE):
		if self.check_ka_timer(message,ignore):
			return
		if self.single:
			raise Notify(2,6,'Negotiated holdtime was zero, it was invalid to send us a keepalive messages')
		self.single = True
Ejemplo n.º 3
0
class Handler (object):
	callback = {
		'text': {},
		'json': {},
	}

	# need to sort and reverse, in order for the shorter command to not used by error
	# "show neighbor" should not match "show neighbors"
	functions = sorted([
		'withdraw watchdog',
		'withdraw vpls',
		'withdraw route',
		'withdraw flow',
		'withdraw attribute',
		'version',
		'teardown',
		'shutdown',
		'show routes extensive',
		'show routes',
		'show neighbors',
		'show neighbor',
		'restart',
		'reload',
		'flush route',
		'announce watchdog',
		'announce vpls',
		'announce route-refresh',
		'announce route',
		'announce operational',
		'announce flow',
		'announce eor',
		'announce attribute'
	],reverse=True)

	def __init__ (self):
		self.logger = Logger()
		self.parser = Parser.Text()

		try:
			for name in self.functions:
				self.callback['text'][name] = Command.Text.callback[name]
		except KeyError:
			raise RuntimeError('The code does not have an implementation for "%s", please code it !' % name)

	def text (self, reactor, service, command):
		for registered in self.functions:
			if registered in command:
				self.logger.reactor("callback | handling '%s' with %s" % (command,self.callback['text'][registered].func_name),'warning')
				self.callback['text'][registered](self,reactor,service,command)
				return True
		self.logger.reactor("Command from process not understood : %s" % command,'warning')
		return False
Ejemplo n.º 4
0
class _Configuration(object):
    def __init__(self):
        self.processes = {}
        self.neighbors = {}
        self.logger = Logger()

    def inject_change(self, peers, change):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                if change.nlri.family() in self.neighbors[neighbor].families():
                    self.neighbors[neighbor].rib.outgoing.insert_announced(change)
                else:
                    self.logger.configuration("the route family is not configured on neighbor", "error")
                    result = False
        return result

    def inject_eor(self, peers, family):
        result = False
        for neighbor in self.neighbors:
            if neighbor in peers:
                result = True
                self.neighbors[neighbor].eor.append(family)
        return result

    def inject_operational(self, peers, operational):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                if operational.family() in self.neighbors[neighbor].families():
                    if operational.name == "ASM":
                        self.neighbors[neighbor].asm[operational.family()] = operational
                    self.neighbors[neighbor].messages.append(operational)
                else:
                    self.logger.configuration("the route family is not configured on neighbor", "error")
                    result = False
        return result

    def inject_refresh(self, peers, refresh):
        result = True
        for neighbor in self.neighbors:
            if neighbor in peers:
                family = (refresh.afi, refresh.safi)
                if family in self.neighbors[neighbor].families():
                    self.neighbors[neighbor].refresh.append(refresh.__class__(refresh.afi, refresh.safi))
                else:
                    result = False
        return result
Ejemplo n.º 5
0
	def __init__ (self, configurations):
		self._ips = environment.settings().tcp.bind
		self._port = environment.settings().tcp.port
		self._stopping = environment.settings().tcp.once
		self.exit_code = self.Exit.unknown

		self.max_loop_time = environment.settings().reactor.speed
		self._sleep_time = self.max_loop_time / 100
		self._busyspin = {}
		self.early_drop = environment.settings().daemon.drop

		self.processes = None

		self.configuration = Configuration(configurations)
		self.logger = Logger()
		self.asynchronous = ASYNC()
		self.signal = Signal()
		self.daemon = Daemon(self)
		self.listener = Listener(self)
		self.api = API(self)

		self.peers = {}

		self._reload_processes = False
		self._saved_pid = False
Ejemplo n.º 6
0
	def __init__ (self, session, holdtime):
		self.logger = Logger()
		self.session = session

		self.keepalive = holdtime.keepalive()
		self.last_print = int(time.time())
		self.last_sent = int(time.time())
Ejemplo n.º 7
0
	def __init__ (self, configurations):
		self.ip = environment.settings().tcp.bind
		self.port = environment.settings().tcp.port
		self.respawn = environment.settings().api.respawn

		self.max_loop_time = environment.settings().reactor.speed
		self.early_drop = environment.settings().daemon.drop

		self.logger = Logger()
		self.daemon = Daemon(self)
		self.processes = None
		self.listener = None
		self.configuration = Configuration(configurations)
		self.api = API(self)

		self.peers = {}
		self.route_update = False

		self._stopping = environment.settings().tcp.once
		self._shutdown = False
		self._reload = False
		self._reload_processes = False
		self._restart = False
		self._saved_pid = False
		self._pending = deque()
		self._running = None

		signal.signal(signal.SIGTERM, self.sigterm)
		signal.signal(signal.SIGHUP, self.sighup)
		signal.signal(signal.SIGALRM, self.sigalrm)
		signal.signal(signal.SIGUSR1, self.sigusr1)
		signal.signal(signal.SIGUSR2, self.sigusr2)
Ejemplo n.º 8
0
	def __init__ (self,configuration):
		self.ip = environment.settings().tcp.bind
		self.port = environment.settings().tcp.port

		self.max_loop_time = environment.settings().reactor.speed
		self.half_loop_time = self.max_loop_time / 2

		self.logger = Logger()
		self.daemon = Daemon(self)
		self.processes = None
		self.listener = None
		self.configuration = Configuration(configuration)

		self._peers = {}
		self._shutdown = False
		self._reload = False
		self._reload_processes = False
		self._restart = False
		self._route_update = False
		self._saved_pid = False
		self._commands = []
		self._pending = []

		signal.signal(signal.SIGTERM, self.sigterm)
		signal.signal(signal.SIGHUP, self.sighup)
		signal.signal(signal.SIGALRM, self.sigalrm)
		signal.signal(signal.SIGUSR1, self.sigusr1)
		signal.signal(signal.SIGUSR2, self.sigusr2)
Ejemplo n.º 9
0
	def __init__ (self, reactor):
		self.logger = Logger()
		self.reactor = reactor
		self.clean()
		self.silence = False

		from exabgp.configuration.environment import environment
		self.highres = environment.settings().api.highres
Ejemplo n.º 10
0
	def bug_report (type, value, trace):
		import traceback
		from exabgp.logger import Logger
		logger = Logger()

		print
		print
		print "-"*80
		print "-- Please provide the information below on :"
		print "-- https://github.com/Exa-Networks/exabgp/issues"
		print "-"*80
		print
		print
		print '-- Information'
		print
		print
		print 'ExaBGP : %s' % version
		print 'Python : %s' % sys.version.replace('\n',' ')
		print 'Uname  : %s' % platform.version()
		print 'MaxInt : %s' % str(sys.maxint)
		print
		print
		print "-- Configuration"
		print
		print
		print logger.config()
		print
		print
		print "-- Logging History"
		print
		print
		print logger.history()
		print
		print
		print "-- Traceback"
		print
		print
		traceback.print_exception(type,value,trace)
		print
		print
		print "-"*80
		print "-- Please provide the information above on :"
		print "-- https://github.com/Exa-Networks/exabgp/issues"
		print "-"*80
		print
		print
Ejemplo n.º 11
0
def UpdateFactory (negotiated,data):
	logger = Logger()

	length = len(data)

	lw,withdrawn,data = defix(data)

	if len(withdrawn) != lw:
		raise Notify(3,1,'invalid withdrawn routes length, not enough data available')

	la,attribute,announced = defix(data)

	if len(attribute) != la:
		raise Notify(3,1,'invalid total path attribute length, not enough data available')

	if 2 + lw + 2+ la + len(announced) != length:
		raise Notify(3,1,'error in BGP message length, not enough data for the size announced')

	attributes = AttributesFactory(NLRIFactory,negotiated,attribute)

	# Is the peer going to send us some Path Information with the route (AddPath)
	addpath = negotiated.addpath.receive(AFI(AFI.ipv4),SAFI(SAFI.unicast))
	nho = attributes.get(AID.NEXT_HOP,None)
	nh = nho.packed if nho else None

	if not withdrawn:
		logger.parser(LazyFormat("parsed no withdraw nlri",od,''))

	nlris = []
	while withdrawn:
		length,nlri = NLRIFactory(AFI.ipv4,SAFI.unicast_multicast,withdrawn,addpath,nh,IN.withdrawn)
		logger.parser(LazyFormat("parsed withdraw nlri %s payload " % nlri,od,withdrawn[:len(nlri)]))
		withdrawn = withdrawn[length:]
		nlris.append(nlri)

	if not announced:
		logger.parser(LazyFormat("parsed no announced nlri",od,''))

	while announced:
		length,nlri = NLRIFactory(AFI.ipv4,SAFI.unicast_multicast,announced,addpath,nh,IN.announced)
		logger.parser(LazyFormat("parsed announce nlri %s payload " % nlri,od,announced[:len(nlri)]))
		announced = announced[length:]
		nlris.append(nlri)

	for nlri in attributes.mp_withdraw:
		nlris.append(nlri)

	for nlri in attributes.mp_announce:
		nlris.append(nlri)

	return Update(nlris,attributes)
Ejemplo n.º 12
0
class Daemon (object):

	def __init__ (self, reactor):
		self.pid = environment.settings().daemon.pid
		self.user = environment.settings().daemon.user
		self.daemonize = environment.settings().daemon.daemonize
                self.umask = environment.settings().daemon.umask

		self.logger = Logger()

		self.reactor = reactor

		os.chdir('/')
                os.umask(self.umask)

	def savepid (self):
		self._saved_pid = False

		if not self.pid:
			return True

		ownid = os.getpid()

		flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
		mode = ((os.R_OK | os.W_OK) << 6) | (os.R_OK << 3) | os.R_OK

		try:
			fd = os.open(self.pid,flags,mode)
		except OSError:
			self.logger.daemon("PIDfile already exists, not updated %s" % self.pid)
			return False

		try:
			f = os.fdopen(fd,'w')
			line = "%d\n" % ownid
			f.write(line)
			f.close()
			self._saved_pid = True
		except IOError:
			self.logger.daemon("Can not create PIDfile %s" % self.pid,'warning')
			return False
		self.logger.daemon("Created PIDfile %s with value %d" % (self.pid,ownid),'warning')
		return True

	def removepid (self):
		if not self.pid or not self._saved_pid:
			return
		try:
			os.remove(self.pid)
		except OSError,exc:
			if exc.errno == errno.ENOENT:
				pass
			else:
				self.logger.daemon("Can not remove PIDfile %s" % self.pid,'error')
				return
		self.logger.daemon("Removed PIDfile %s" % self.pid)
Ejemplo n.º 13
0
def ready (io):
	logger = Logger()
	warned = False
	start = time.time()

	while True:
		try:
			_,w,_ = select.select([],[io,],[],0)
			if not w:
				if not warned and time.time()-start > 1.0:
					logger.debug('attempting to establish connection','network')
					warned = True
				yield False
				continue
			err = io.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
			if not err:
				if warned:
					logger.warning('connection established','network')
				yield True
				return
			elif err in error.block:
				logger.warning('connect attempt failed, retrying, reason %s' % errno.errorcode[err],'network')
				yield False
			else:
				yield False
				return
		except select.error:
			yield False
			return
Ejemplo n.º 14
0
def bug_report (dtype, value, trace):
	print(panic)

	import traceback

	print("-- Traceback\n\n")
	traceback.print_exception(dtype,value,trace)

	from exabgp.logger import Logger
	logger = Logger()

	print("\n\n-- Configuration\n\n")
	print(logger.config())
	print("\n\n-- Logging History\n\n")
	print(logger.history())
	print("\n\n\n")

	print(footer)
Ejemplo n.º 15
0
def bug_report (type, value, trace):
	print panic

	import traceback

	print "-- Traceback\n\n"
	traceback.print_exception(type,value,trace)

	from exabgp.logger import Logger
	logger = Logger()

	print "\n\n-- Configuration\n\n"
	print logger.config()
	print "\n\n-- Logging History\n\n"
	print logger.history()
	print "\n\n\n"

	print footer
Ejemplo n.º 16
0
	def __init__ (self):
		self.logger = Logger()
		self.parser = Parser.Text()

		try:
			for name in self.functions:
				self.callback['text'][name] = Command.Text.callback[name]
		except KeyError:
			raise RuntimeError('The code does not have an implementation for "%s", please code it !' % name)
Ejemplo n.º 17
0
	def __init__ (self, reactor, backlog=200):
		self.serving = False
		self.logger = Logger()

		self._reactor = reactor
		self._backlog = backlog
		self._sockets = {}
		self._accepted = {}
		self._pending = 0
Ejemplo n.º 18
0
Archivo: debug.py Proyecto: asnd/exabgp
	def bug_report (type, value, trace):
		import traceback
		from exabgp.logger import Logger
		logger = Logger()

		print
		print
		print "-"*80
		print "-- Please provide the information below on :"
		print "-- https://github.com/Exa-Networks/exabgp/issues"
		print "-"*80
		print
		print
		print '-- Version'
		print
		print
		print sys.version
		print
		print
		print "-- Configuration"
		print
		print
		print logger.config()
		print
		print
		print "-- Logging History"
		print
		print
		print logger.history()
		print
		print
		print "-- Traceback"
		print
		print
		traceback.print_exception(type,value,trace)
		print
		print
		print "-"*80
		print "-- Please provide the information above on :"
		print "-- https://github.com/Exa-Networks/exabgp/issues"
		print "-"*80
		print
		print
Ejemplo n.º 19
0
	def __init__ (self):
		self.logger = Logger()
		self.clean()
		self.silence = False
		self._buffer = {}
		self._configuration = {}

		self.respawn_number = 5 if environment.settings().api.respawn else 0
		self.terminate_on_error = environment.settings().api.terminate
		self.ack = environment.settings().api.ack
Ejemplo n.º 20
0
	def __init__ (self,me,holdtime,code,subcode,message=''):
		self.logger = Logger()
		self.me = me

		self.holdtime = holdtime
		self.last_read = time.time()

		self.code = code
		self.subcode = subcode
		self.message = message
Ejemplo n.º 21
0
	def parse (self, data, negotiated):
		if not data:
			return self

		# We do not care if the attribute are transitive or not as we do not redistribute
		flag = Attribute.Flag(ord(data[0]))
		aid = Attribute.CODE(ord(data[1]))

		if flag & Attribute.Flag.EXTENDED_LENGTH:
			length = unpack('!H',data[2:4])[0]
			offset = 4
		else:
			length = ord(data[2])
			offset = 3

		data = data[offset:]
		left = data[length:]
		attribute = data[:length]

		logger = Logger()
		logger.parser(LazyFormat("parsing flag %x type %02x (%s) len %02x %s" % (flag,int(aid),aid,length,'payload ' if length else ''),data[:length]))

		# remove the PARTIAL bit before comparaison if the attribute is optional
		if aid in Attribute.attributes_optional:
			flag &= Attribute.Flag.MASK_PARTIAL & 0xFF
			# flag &= ~Attribute.Flag.PARTIAL & 0xFF  # cleaner than above (python use signed integer for ~)

		# handle the attribute if we know it
		if Attribute.registered(aid,flag):
			self.add(Attribute.unpack(aid,flag,attribute,negotiated))
			return self.parse(left,negotiated)
		# XXX: FIXME: we could use a fallback function here like capability

		# if we know the attribute but the flag is not what the RFC says. ignore it.
		if aid in Attribute.attributes_known:
			logger.parser('invalid flag for attribute %s (flag 0x%02X, aid 0x%02X)' % (Attribute.CODE.names.get(aid,'unset'),flag,aid))
			return self.parse(left,negotiated)

		# it is an unknown transitive attribute we need to pass on
		if flag & Attribute.Flag.TRANSITIVE:
			logger.parser('unknown transitive attribute (flag 0x%02X, aid 0x%02X)' % (flag,aid))
			self.add(GenericAttribute(aid,flag | Attribute.Flag.PARTIAL,attribute),attribute)
			return self.parse(left,negotiated)

		# it is an unknown non-transitive attribute we can ignore.
		logger.parser('ignoring unknown non-transitive attribute (flag 0x%02X, aid 0x%02X)' % (flag,aid))
		return self.parse(left,negotiated)
Ejemplo n.º 22
0
	def __init__ (self, reactor):
		self.pid = environment.settings().daemon.pid
		self.user = environment.settings().daemon.user
		self.daemonize = environment.settings().daemon.daemonize
		self.umask = environment.settings().daemon.umask

		self.logger = Logger()

		self.reactor = reactor

		os.chdir('/')
		os.umask(self.umask)
Ejemplo n.º 23
0
	def __init__ (self, session, holdtime, code, subcode, message=''):
		self.logger = Logger()
		self.session = session

		self.holdtime = holdtime
		self.last_print = time.time()
		self.last_read = time.time()

		self.code = code
		self.subcode = subcode
		self.message = message
		self.single = False
Ejemplo n.º 24
0
class Decoder (object):
	storage = {}

	def __init__ (self):
		self.logger = Logger()
		self.format = Text()

	# callaback code

	@classmethod
	def register_command (cls, command, function):
		cls.storage[command] = function
		return function

	def parse_command (self, reactor, service, command):
		# it must be reversed so longer command are found before the shorter
		# "show neighbor" should not match "show neighbors"
		for registered in sorted(self.storage, reverse=True):
			if registered in command:
				return self.storage[registered](self,reactor,service,command)
		self.logger.reactor("Command from process not understood : %s" % command,'warning')
		return False
Ejemplo n.º 25
0
class ReceiveTimer(object):
    def __init__(self, me, holdtime, code, subcode, message=""):
        self.logger = Logger()
        self.me = me

        self.holdtime = holdtime
        self.last_read = time.time()

        self.code = code
        self.subcode = subcode
        self.message = message

    def check_ka(self, message=_NOP, ignore=_NOP.TYPE):
        if message.TYPE != ignore:
            self.last_read = time.time()
        if self.holdtime:
            left = int(self.last_read + self.holdtime - time.time())
            self.logger.timers(self.me("Receive Timer %d second(s) left" % left))
            if left <= 0:
                raise Notify(self.code, self.subcode, self.message)
        elif message.TYPE == KeepAlive.TYPE:
            raise Notify(2, 6, "Negotiated holdtime was zero, it was invalid to send us a keepalive messages")
Ejemplo n.º 26
0
class SendTimer (object):
	def __init__ (self,me,holdtime):
		self.logger = Logger()
		self.me = me

		self.keepalive = holdtime.keepalive()
		self.last_sent = int(time.time())

	def need_ka (self):
		if not self.keepalive:
			return False

		now  = int(time.time())
		left = self.last_sent + self.keepalive - now

		if now != self.last_sent:
			self.logger.timers(self.me('Send Timer %d second(s) left' % left))

		if left <= 0:
			self.last_sent = now
			return True
		return False
Ejemplo n.º 27
0
class Timer (object):
	def __init__ (self,me,holdtime,code,subcode,message=''):
		self.logger = Logger()

		self.me = me

		self.code = code
		self.subcode = subcode
		self.message = message

		self.holdtime = holdtime
		self.last_read = time.time()
		self.last_sent = time.time()

	def tick (self,message=_NOP,ignore=_NOP.TYPE):
		if message.TYPE != ignore:
			self.last_read = time.time()
		if self.holdtime:
			left = int(self.last_read  + self.holdtime - time.time())
			self.logger.timers(self.me('Receive Timer %d second(s) left' % left))
			if left <= 0:
				raise Notify(self.code,self.subcode,self.message)
		elif message.TYPE == KeepAlive.TYPE:
			raise Notify(2,6,'Holdtime is zero and we got a keepalive message')

	def keepalive (self):
		if not self.holdtime:
			return False

		left = int(self.last_sent + self.holdtime.keepalive() - time.time())
		self.logger.timers(self.me('Sending Timer %d second(s) left' % left))

		if left <= 0:
			self.last_sent = time.time()
			return True
		return False
Ejemplo n.º 28
0
class SendTimer (object):
	def __init__ (self, session, holdtime):
		self.logger = Logger()
		self.session = session

		self.keepalive = holdtime.keepalive()
		self.last_print = int(time.time())
		self.last_sent = int(time.time())

	def need_ka (self):
		if not self.keepalive:
			return False

		now  = int(time.time())
		left = self.last_sent + self.keepalive - now

		if now != self.last_print:
			self.logger.debug('send-timer %d second(s) left' % left,source='ka-'+self.session())
			self.last_print = now

		if left <= 0:
			self.last_sent = now
			return True
		return False
Ejemplo n.º 29
0
class ASYNC (object):
	def __init__ (self):
		self.logger = Logger()
		self._async = []

	def ready (self):
		return len(self._async) > 0

	def schedule (self, uid, command, callback):
		self.logger.debug('async | %s | %s' % (uid,command),'reactor')
		if self._async:
			self._async[0].append((uid,callback))
		else:
			self._async.append([(uid,callback),])

	def clear (self, deluid=None):
		if not self._async:
			return
		if deluid is None:
			self._async = []
			return
		running = []
		for (uid,generator) in self._async[0]:
			if uid != deluid:
				running.append((uid,generator))
		self._async.pop()
		if running:
			self._async.append(running)

	def run (self):
		if not self._async:
			return False
		running = []

		for (uid,generator) in self._async[0]:
			try:
				six.next(generator)
				six.next(generator)
				running.append((uid,generator))
			except StopIteration:
				pass
			except KeyboardInterrupt:
				raise
			except Exception as exc:
				self.logger.error('async | %s | problem with function' % uid,'reactor')
				for line in str(exc).split('\n'):
					self.logger.error('async | %s | %s' % (uid,line),'reactor')
		self._async.pop()
		if running:
			self._async.append(running)
		return True
Ejemplo n.º 30
0
class ASYNC (object):
	LIMIT = 500

	def __init__ (self):
		self.logger = Logger()
		self._async = deque()

	def ready (self):
		return len(self._async) > 0

	def schedule (self, uid, command, callback):
		self.logger.debug('async | %s | %s' % (uid,command),'reactor')
		self._async.append((uid,callback))

	def clear (self, deluid=None):
		if not self._async:
			return
		if deluid is None:
			# We could delete all the generators just to be safe
			self._async = deque()
			return
		running = deque()
		for (uid,generator) in self._async:
			if uid != deluid:
				running.append((uid,generator))
		self._async = running

	def run (self):
		if not self.ready():
			return False

		length = range(min(len(self._async),self.LIMIT))
		uid, generator = self._async.popleft()

		for _ in length:
			try:
				six.next(generator)
				six.next(generator)
			except StopIteration:
				if not self._async:
					return False
				uid, generator = self._async.popleft()
			except KeyboardInterrupt:
				raise
			except Exception as exc:
				self.logger.error('async | %s | problem with function' % uid,'reactor')
				for line in str(exc).split('\n'):
					self.logger.error('async | %s | %s' % (uid,line),'reactor')

		self._async.appendleft((uid, generator))
		return True
Ejemplo n.º 31
0
 def __init__(self):
     self.logger = Logger()
     self.received = self.NONE
     self.number = 0
     self.rearm()
Ejemplo n.º 32
0
class Reactor(object):
    class Exit(object):
        normal = 0
        validate = 0
        listening = 1
        configuration = 1
        privileges = 1
        log = 1
        pid = 1
        socket = 1
        io_error = 1
        process = 1
        select = 1
        unknown = 1

    # [hex(ord(c)) for c in os.popen('clear').read()]
    clear = concat_bytes_i(
        character(int(c, 16))
        for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a'])

    def __init__(self, configurations):
        self._ips = environment.settings().tcp.bind
        self._port = environment.settings().tcp.port
        self._stopping = environment.settings().tcp.once
        self.exit_code = self.Exit.unknown

        self.max_loop_time = environment.settings().reactor.speed
        self._sleep_time = self.max_loop_time / 100
        self._busyspin = {}
        self._ratelimit = {}
        self.early_drop = environment.settings().daemon.drop

        self.processes = None

        self.configuration = Configuration(configurations)
        self.logger = Logger()
        self.asynchronous = ASYNC()
        self.signal = Signal()
        self.daemon = Daemon(self)
        self.listener = Listener(self)
        self.api = API(self)

        self._peers = {}

        self._reload_processes = False
        self._saved_pid = False
        self._poller = select.poll()

    def _termination(self, reason, exit_code):
        self.exit_code = exit_code
        self.signal.received = Signal.SHUTDOWN
        self.logger.critical(reason, 'reactor')

    def _prevent_spin(self):
        second = int(time.time())
        if not second in self._busyspin:
            self._busyspin = {second: 0}
        self._busyspin[second] += 1
        if self._busyspin[second] > self.max_loop_time:
            time.sleep(self._sleep_time)
            return True
        return False

    def _rate_limited(self, peer, rate):
        if rate <= 0:
            return False
        second = int(time.time())
        ratelimit = self._ratelimit.get(peer, {})
        if not second in ratelimit:
            self._ratelimit[peer] = {second: rate - 1}
            return False
        if self._ratelimit[peer][second] > 0:
            self._ratelimit[peer][second] -= 1
            return False
        return True

    def _wait_for_io(self, sleeptime):
        try:
            for fd, event in self._poller.poll(sleeptime):
                if event & select.POLLIN or event & select.POLLPRI:
                    yield fd
                    continue
                elif event & select.POLLHUP:
                    self._termination('^C received', self.Exit.normal)
                    return
                elif event & select.POLLERR or event & select.POLLNVAL:
                    self._prevent_spin()
                    continue
        except KeyboardInterrupt:
            self._termination('^C received', self.Exit.normal)
            return
        except Exception:
            self._prevent_spin()
            return

    # peer related functions

    def active_peers(self):
        peers = set()
        for key, peer in self._peers.items():
            if not peer.neighbor.passive or peer.proto:
                peers.add(key)
        return peers

    def established_peers(self):
        peers = set()
        for key, peer in self._peers.items():
            if peer.fsm == FSM.ESTABLISHED:
                peers.add(key)
        return peers

    def peers(self):
        return list(self._peers)

    def handle_connection(self, peer_name, connection):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return
        peer.handle_connection(connection)

    def neighbor(self, peer_name):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return
        return peer.neighbor

    def neighbor_name(self, peer_name):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return ""
        return peer.neighbor.name()

    def neighbor_ip(self, peer_name):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return ""
        return str(peer.neighbor.peer_address)

    def neighbor_cli_data(self, peer_name):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return ""
        return peer.cli_data()

    def neighor_rib(self, peer_name, rib_name, advertised=False):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return []
        families = None
        if advertised:
            families = peer.proto.negotiated.families if peer.proto else []
        rib = peer.neighbor.rib.outgoing if rib_name == 'out' else peer.neighbor.rib.incoming
        return list(rib.cached_changes(families))

    def neighbor_rib_resend(self, peer_name):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return
        peer.neighbor.rib.outgoing.resend(None, peer.neighbor.route_refresh)

    def neighbor_rib_out_withdraw(self, peer_name):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return
        peer.neighbor.rib.outgoing.withdraw(None, peer.neighbor.route_refresh)

    def neighbor_rib_in_clear(self, peer_name):
        peer = self._peers.get(peer_name, None)
        if not peer:
            self.logger.critical('could not find referenced peer', 'reactor')
            return
        peer.neighbor.rib.incoming.clear()

    # ...

    def _completed(self, peers):
        for peer in peers:
            if self._peers[peer].neighbor.rib.outgoing.pending():
                return False
        return True

    def run(self, validate, root):
        self.daemon.daemonise()

        # Make sure we create processes once we have closed file descriptor
        # unfortunately, this must be done before reading the configuration file
        # so we can not do it with dropped privileges
        self.processes = Processes()

        # we have to read the configuration possibly with root privileges
        # as we need the MD5 information when we bind, and root is needed
        # to bind to a port < 1024

        # this is undesirable as :
        # - handling user generated data as root should be avoided
        # - we may not be able to reload the configuration once the privileges are dropped

        # but I can not see any way to avoid it
        for ip in self._ips:
            if not self.listener.listen_on(ip, None, self._port, None, False,
                                           None):
                return self.Exit.listening

        if not self.load():
            return self.Exit.configuration

        if validate:  # only validate configuration
            self.logger.warning('', 'configuration')
            self.logger.warning('parsed Neighbors, un-templated',
                                'configuration')
            self.logger.warning('------------------------------',
                                'configuration')
            self.logger.warning('', 'configuration')
            for key in self._peers:
                self.logger.warning(str(self._peers[key].neighbor),
                                    'configuration')
                self.logger.warning('', 'configuration')
            return self.Exit.validate

        for neighbor in self.configuration.neighbors.values():
            if neighbor.listen:
                if not self.listener.listen_on(
                        neighbor.md5_ip, neighbor.peer_address,
                        neighbor.listen, neighbor.md5_password,
                        neighbor.md5_base64, neighbor.ttl_in):
                    return self.Exit.listening

        if not self.early_drop:
            self.processes.start(self.configuration.processes)

        if not self.daemon.drop_privileges():
            self.logger.critical(
                'could not drop privileges to \'%s\' refusing to run as root' %
                self.daemon.user, 'reactor')
            self.logger.critical(
                'set the environmemnt value exabgp.daemon.user to change the unprivileged user',
                'reactor')
            return self.Exit.privileges

        if self.early_drop:
            self.processes.start(self.configuration.processes)

        # This is required to make sure we can write in the log location as we now have dropped root privileges
        if not self.logger.restart():
            self.logger.critical('could not setup the logger, aborting',
                                 'reactor')
            return self.Exit.log

        if not self.daemon.savepid():
            return self.Exit.pid

        # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ?
        reload_completed = False

        wait = environment.settings().tcp.delay
        if wait:
            sleeptime = (wait * 60) - int(time.time()) % (wait * 60)
            self.logger.debug(
                'waiting for %d seconds before connecting' % sleeptime,
                'reactor')
            time.sleep(float(sleeptime))

        workers = {}
        peers = set()
        api_fds = []

        while True:
            try:
                if self.signal.received:
                    for key in self._peers:
                        if self._peers[key].neighbor.api['signal']:
                            self._peers[key].reactor.processes.signal(
                                self._peers[key].neighbor, self.signal.number)

                    signaled = self.signal.received
                    self.signal.rearm()

                    if signaled == Signal.SHUTDOWN:
                        self.shutdown()
                        break

                    if signaled == Signal.RESTART:
                        self.restart()
                        continue

                    if not reload_completed:
                        continue

                    if signaled == Signal.FULL_RELOAD:
                        self._reload_processes = True

                    if signaled in (Signal.RELOAD, Signal.FULL_RELOAD):
                        self.load()
                        self.processes.start(self.configuration.processes,
                                             self._reload_processes)
                        self._reload_processes = False
                        continue

                if self.listener.incoming():
                    # check all incoming connection
                    self.asynchronous.schedule(
                        str(uuid.uuid1()), 'checking for new connection(s)',
                        self.listener.new_connections())

                peers = self.active_peers()
                if self._completed(peers):
                    reload_completed = True

                sleep = self._sleep_time

                # do not attempt to listen on closed sockets even if the peer is still here
                for io in list(workers.keys()):
                    if io == -1:
                        self._poller.unregister(io)
                        del workers[io]

                # give a turn to all the peers
                for key in list(peers):
                    peer = self._peers[key]

                    # limit the number of message handling per second
                    if self._rate_limited(key, peer.neighbor.rate_limit):
                        peers.discard(key)
                        continue

                    # handle the peer
                    action = peer.run()

                    # .run() returns an ACTION enum:
                    # * immediate if it wants to be called again
                    # * later if it should be called again but has no work atm
                    # * close if it is finished and is closing down, or restarting
                    if action == ACTION.CLOSE:
                        if key in self._peers:
                            del self._peers[key]
                        peers.discard(key)
                    # we are loosing this peer, not point to schedule more process work
                    elif action == ACTION.LATER:
                        io = peer.socket()
                        if io != -1:
                            self._poller.register(
                                io,
                                select.POLLIN | select.POLLPRI | select.POLLHUP
                                | select.POLLNVAL | select.POLLERR)
                            workers[io] = key
                        # no need to come back to it before a a full cycle
                        peers.discard(key)
                    elif action == ACTION.NOW:
                        sleep = 0

                    if not peers:
                        break

                # read at least on message per process if there is some and parse it
                for service, command in self.processes.received():
                    self.api.text(self, service, command)
                    sleep = 0

                self.asynchronous.run()

                if api_fds != self.processes.fds:
                    for fd in api_fds:
                        if fd == -1:
                            continue
                        if fd not in self.processes.fds:
                            self._poller.unregister(fd)
                    for fd in self.processes.fds:
                        if fd == -1:
                            continue
                        if fd not in api_fds:
                            self._poller.register(
                                fd,
                                select.POLLIN | select.POLLPRI | select.POLLHUP
                                | select.POLLNVAL | select.POLLERR)
                    api_fds = self.processes.fds

                for io in self._wait_for_io(sleep):
                    if io not in api_fds:
                        peers.add(workers[io])

                if self._stopping and not self._peers.keys():
                    self._termination('exiting on peer termination',
                                      self.Exit.normal)

            except KeyboardInterrupt:
                self._termination('^C received', self.Exit.normal)
            except SystemExit:
                self._termination('exiting', self.Exit.normal)
            # socket.error is a subclass of IOError (so catch it first)
            except socket.error:
                self._termination('socket error received', self.Exit.socket)
            except IOError:
                self._termination(
                    'I/O Error received, most likely ^C during IO',
                    self.Exit.io_error)
            except ProcessError:
                self._termination(
                    'Problem when sending message(s) to helper program, stopping',
                    self.Exit.process)
            except select.error:
                self._termination('problem using select, stopping',
                                  self.Exit.select)

        return self.exit_code

    def register_peer(self, name, peer):
        self._peers[name] = peer

    def teardown_peer(self, name, code):
        self._peers[name].teardown(code)

    def shutdown(self):
        """Terminate all the current BGP connections"""
        self.logger.critical('performing shutdown', 'reactor')
        if self.listener:
            self.listener.stop()
            self.listener = None
        for key in self._peers.keys():
            self._peers[key].shutdown()
        self.asynchronous.clear()
        self.processes.terminate()
        self.daemon.removepid()
        self._stopping = True

    def load(self):
        """Reload the configuration and send to the peer the route which changed"""
        self.logger.notice('performing reload of exabgp %s' % version,
                           'configuration')

        reloaded = self.configuration.reload()

        if not reloaded:
            #
            # Careful the string below is used but the QA code to check for sucess of failure
            self.logger.error(
                'not reloaded, no change found in the configuration',
                'configuration')
            # Careful the string above is used but the QA code to check for sucess of failure
            #
            self.logger.error(str(self.configuration.error), 'configuration')
            return False

        for key, peer in self._peers.items():
            if key not in self.configuration.neighbors:
                self.logger.debug('removing peer: %s' % peer.neighbor.name(),
                                  'reactor')
                peer.remove()

        for key, neighbor in self.configuration.neighbors.items():
            # new peer
            if key not in self._peers:
                self.logger.debug('new peer: %s' % neighbor.name(), 'reactor')
                peer = Peer(neighbor, self)
                self._peers[key] = peer
            # modified peer
            elif self._peers[key].neighbor != neighbor:
                self.logger.debug(
                    'peer definition change, establishing a new connection for %s'
                    % str(key), 'reactor')
                self._peers[key].reestablish(neighbor)
            # same peer but perhaps not the routes
            else:
                # finding what route changed and sending the delta is not obvious
                self.logger.debug(
                    'peer definition identical, updating peer routes if required for %s'
                    % str(key), 'reactor')
                self._peers[key].reconfigure(neighbor)
            for ip in self._ips:
                if ip.afi == neighbor.peer_address.afi:
                    self.listener.listen_on(ip, neighbor.peer_address,
                                            self._port, neighbor.md5_password,
                                            neighbor.md5_base64, None)
        self.logger.notice('loaded new configuration successfully', 'reactor')

        return True

    def restart(self):
        """Kill the BGP session and restart it"""
        self.logger.notice('performing restart of exabgp %s' % version,
                           'reactor')

        # XXX: FIXME: Could return False, in case there is interference with old config...
        reloaded = self.configuration.reload()

        for key in self._peers.keys():
            if key not in self.configuration.neighbors.keys():
                peer = peers[key]
                self.logger.debug('removing peer %s' % peer.neighbor.name(),
                                  'reactor')
                self._peers[key].remove()
            else:
                self._peers[key].reestablish()
        self.processes.start(self.configuration.processes, True)
Ejemplo n.º 33
0
	def __init__ (self):
		self.processes = {}
		self.neighbors = {}
		self.logger = Logger ()
Ejemplo n.º 34
0
class API(Command):
    def __init__(self, reactor):
        self.reactor = reactor
        self.logger = Logger()
        self.configuration = Configuration([])

    def log_message(self, message, level='INFO'):
        self.logger.notice(message, 'api', level)

    def log_failure(self, message, level='ERR'):
        error = str(self.configuration.tokeniser.error)
        report = '%s\nreason: %s' % (message, error) if error else message
        self.logger.error(report, 'api', level)

    def text(self, reactor, service, command):
        for registered in self.functions:
            if registered == command \
             or command.endswith(' ' + registered) \
             or registered + ' ' in command:
                return self.callback['text'][registered](self, reactor,
                                                         service, command)
        reactor.processes.answer_error(service)
        self.logger.warning(
            'command from process not understood : %s' % command, 'api')
        return False

    def api_route(self, command):
        action, line = command.split(' ', 1)

        self.configuration.static.clear()
        if not self.configuration.partial('static', line, action):
            return []

        if self.configuration.scope.location():
            return []

        self.configuration.scope.to_context()
        changes = self.configuration.scope.pop_routes()
        return changes

    def api_flow(self, command):
        action, flow, line = command.split(' ', 2)

        self.configuration.flow.clear()
        if not self.configuration.partial('flow', line):
            return []

        if self.configuration.scope.location():
            return []

        self.configuration.scope.to_context()
        changes = self.configuration.scope.pop_routes()
        return changes

    def api_vpls(self, command):
        action, line = command.split(' ', 1)

        self.configuration.l2vpn.clear()
        if not self.configuration.partial('l2vpn', line):
            return []

        self.configuration.scope.to_context()
        changes = self.configuration.scope.pop('l2vpn')
        return changes

    def api_attributes(self, command, peers):
        action, line = command.split(' ', 1)

        self.configuration.static.clear()
        if not self.configuration.partial('static', line):
            return []

        self.configuration.scope.to_context()
        changes = self.configuration.scope.pop_routes()
        return changes

    def api_refresh(self, command):
        tokens = formated(command).split(' ')[2:]
        if len(tokens) != 2:
            return False
        afi = AFI.value(tokens.pop(0))
        safi = SAFI.value(tokens.pop(0))
        if afi is None or safi is None:
            return False
        return [RouteRefresh(afi, safi)]

    def api_eor(self, command):
        tokens = formated(command).split(' ')[2:]
        number = len(tokens)

        if not number:
            return Family(1, 1)

        if number != 2:
            return False

        afi = AFI.fromString(tokens[0])
        if afi == AFI.undefined:
            return False

        safi = SAFI.fromString(tokens[1])
        if safi == SAFI.undefined:
            return False

        return Family(afi, safi)

    def api_operational(self, command):
        tokens = formated(command).split(' ')

        op = tokens[1].lower()
        what = tokens[2].lower()

        if op != 'operational':
            return False

        self.configuration.tokeniser.iterate.replenish(tokens[3:])
        # None or a class
        return operational(what, self.configuration.tokeniser.iterate)
Ejemplo n.º 35
0
 def __init__(self):
     self.logger = Logger()
     self.format = Text()
Ejemplo n.º 36
0
def check_update(neighbor, raw):
    from exabgp.logger import Logger

    logger = Logger()
    logger._parser = True
    logger.parser('\ndecoding routes in configuration')

    n = neighbor[neighbor.keys()[0]]
    p = Peer(n, None)

    path = {}
    for f in known_families():
        if n.add_path:
            path[f] = n.add_path

    capa = Capabilities().new(n, False)
    capa[Capability.ID.ADD_PATH] = path
    capa[Capability.ID.MULTIPROTOCOL] = n.families()

    routerid_1 = str(n.router_id)
    routerid_2 = '.'.join(
        str((int(_) + 1) % 250) for _ in str(n.router_id).split('.', -1))

    o1 = Open(4, n.local_as, routerid_1, capa, 180)
    o2 = Open(4, n.peer_as, routerid_2, capa, 180)
    negotiated = Negotiated(n)
    negotiated.sent(o1)
    negotiated.received(o2)
    #grouped = False

    while raw:
        if raw.startswith('\xff' * 16):
            kind = ord(raw[18])
            size = (ord(raw[16]) << 16) + (ord(raw[17]))

            injected, raw = raw[19:size], raw[size:]

            if kind == 2:
                logger.parser('the message is an update')
                decoding = 'update'
            else:
                logger.parser('the message is not an update (%d) - aborting' %
                              kind)
                return False
        else:
            logger.parser(
                'header missing, assuming this message is ONE update')
            decoding = 'update'
            injected, raw = raw, ''

        try:
            # This does not take the BGP header - let's assume we will not break that :)
            update = Update.unpack_message(injected, negotiated)
        except KeyboardInterrupt:
            raise
        except Notify, e:
            logger.parser('could not parse the message')
            logger.parser(str(e))
            return False
        except Exception, e:
            logger.parser('could not parse the message')
            logger.parser(str(e))
            return False
Ejemplo n.º 37
0
def UpdateFactory(negotiated, data):
    logger = Logger()

    length = len(data)

    lw, withdrawn, data = defix(data)

    if len(withdrawn) != lw:
        raise Notify(
            3, 1, 'invalid withdrawn routes length, not enough data available')

    la, attribute, announced = defix(data)

    if len(attribute) != la:
        raise Notify(
            3, 1,
            'invalid total path attribute length, not enough data available')

    if 2 + lw + 2 + la + len(announced) != length:
        raise Notify(
            3, 1,
            'error in BGP message length, not enough data for the size announced'
        )

    attributes = AttributesFactory(NLRIFactory, negotiated, attribute)

    # Is the peer going to send us some Path Information with the route (AddPath)
    addpath = negotiated.addpath.receive(AFI(AFI.ipv4), SAFI(SAFI.unicast))
    nho = attributes.get(AID.NEXT_HOP, None)
    nh = nho.packed if nho else None

    if not withdrawn:
        logger.parser(LazyFormat("parsed no withdraw nlri", od, ''))

    nlris = []
    while withdrawn:
        length, nlri = NLRIFactory(AFI.ipv4, SAFI.unicast_multicast, withdrawn,
                                   addpath, nh, IN.withdrawn)
        logger.parser(
            LazyFormat("parsed withdraw nlri %s payload " % nlri, od,
                       withdrawn[:len(nlri)]))
        withdrawn = withdrawn[length:]
        nlris.append(nlri)

    if not announced:
        logger.parser(LazyFormat("parsed no announced nlri", od, ''))

    while announced:
        length, nlri = NLRIFactory(AFI.ipv4, SAFI.unicast_multicast, announced,
                                   addpath, nh, IN.announced)
        logger.parser(
            LazyFormat("parsed announce nlri %s payload " % nlri, od,
                       announced[:len(nlri)]))
        announced = announced[length:]
        nlris.append(nlri)

    for nlri in attributes.mp_withdraw:
        nlris.append(nlri)

    for nlri in attributes.mp_announce:
        nlris.append(nlri)

    return Update(nlris, attributes)
Ejemplo n.º 38
0
    def parse(self, data, negotiated):
        if not data:
            return self

        try:
            # We do not care if the attribute are transitive or not as we do not redistribute
            flag = Attribute.Flag(ordinal(data[0]))
            aid = Attribute.CODE(ordinal(data[1]))
        except IndexError:
            self.add(TreatAsWithdraw())
            return self

        try:
            offset = 3
            length = ordinal(data[2])

            if flag & Attribute.Flag.EXTENDED_LENGTH:
                offset = 4
                length = (length << 8) + ordinal(data[3])
        except IndexError:
            self.add(TreatAsWithdraw(aid))
            return self

        data = data[offset:]
        left = data[length:]
        attribute = data[:length]

        logger = Logger()
        logger.parser(LazyAttribute(flag, aid, length, data[:length]))

        # remove the PARTIAL bit before comparaison if the attribute is optional
        if aid in Attribute.attributes_optional:
            flag &= Attribute.Flag.MASK_PARTIAL & 0xFF
            # flag &= ~Attribute.Flag.PARTIAL & 0xFF  # cleaner than above (python use signed integer for ~)

        if aid in self:
            if aid in self.NO_DUPLICATE:
                raise Notify(
                    3, 1, 'multiple attribute for %s' %
                    str(Attribute.CODE(attribute.ID)))

            logger.parser(
                'duplicate attribute %s (flag 0x%02X, aid 0x%02X) skipping' %
                (Attribute.CODE.names.get(aid, 'unset'), flag, aid))
            return self.parse(left, negotiated)

        # handle the attribute if we know it
        if Attribute.registered(aid, flag):
            if length == 0 and aid not in self.VALID_ZERO:
                self.add(TreatAsWithdraw(aid))
                return self.parse(left, negotiated)

            try:
                decoded = Attribute.unpack(aid, flag, attribute, negotiated)
            except IndexError as exc:
                if aid in self.TREAT_AS_WITHDRAW:
                    decoded = TreatAsWithdraw(aid)
                else:
                    raise exc
            except Notify as exc:
                if aid in self.TREAT_AS_WITHDRAW:
                    decoded = TreatAsWithdraw()
                elif aid in self.DISCARD:
                    decoded = Discard()
                else:
                    raise exc
            self.add(decoded)
            return self.parse(left, negotiated)

        # XXX: FIXME: we could use a fallback function here like capability

        # if we know the attribute but the flag is not what the RFC says.
        if aid in Attribute.attributes_known:
            if aid in self.TREAT_AS_WITHDRAW:
                logger.parser(
                    'invalid flag for attribute %s (flag 0x%02X, aid 0x%02X) treat as withdraw'
                    % (Attribute.CODE.names.get(aid, 'unset'), flag, aid))
                self.add(TreatAsWithdraw())
            if aid in self.DISCARD:
                logger.parser(
                    'invalid flag for attribute %s (flag 0x%02X, aid 0x%02X) discard'
                    % (Attribute.CODE.names.get(aid, 'unset'), flag, aid))
                return self.parse(left, negotiated)
            # XXX: Check if we are missing any
            logger.parser(
                'invalid flag for attribute %s (flag 0x%02X, aid 0x%02X) unspecified (should not happen)'
                % (Attribute.CODE.names.get(aid, 'unset'), flag, aid))
            return self.parse(left, negotiated)

        # it is an unknown transitive attribute we need to pass on
        if flag & Attribute.Flag.TRANSITIVE:
            logger.parser(
                'unknown transitive attribute (flag 0x%02X, aid 0x%02X)' %
                (flag, aid))
            try:
                decoded = GenericAttribute(aid, flag | Attribute.Flag.PARTIAL,
                                           attribute)
            except IndexError:
                decoded = TreatAsWithdraw(aid)
            self.add(decoded, attribute)
            return self.parse(left, negotiated)

        # it is an unknown non-transitive attribute we can ignore.
        logger.parser(
            'ignoring unknown non-transitive attribute (flag 0x%02X, aid 0x%02X)'
            % (flag, aid))
        return self.parse(left, negotiated)
Ejemplo n.º 39
0
class Listener(object):
    _family_AFI_map = {
        socket.AF_INET: AFI.ipv4,
        socket.AF_INET6: AFI.ipv6,
    }

    def __init__(self, reactor, backlog=200):
        self.serving = False
        self.logger = Logger()

        self._reactor = reactor
        self._backlog = backlog
        self._sockets = {}
        self._accepted = {}
        self._pending = 0

    def _new_socket(self, ip):
        if ip.afi == AFI.ipv6:
            return socket.socket(socket.AF_INET6, socket.SOCK_STREAM,
                                 socket.IPPROTO_TCP)
        if ip.afi == AFI.ipv4:
            return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
                                 socket.IPPROTO_TCP)
        raise NetworkError(
            'Can not create socket for listening, family of IP %s is unknown' %
            ip)

    def _listen(self, local_ip, peer_ip, local_port, md5, md5_base64, ttl_in):
        self.serving = True

        for sock, (local, port, peer, md) in self._sockets.items():
            if local_ip.top() != local:
                continue
            if local_port != port:
                continue
            MD5(sock, peer_ip.top(), 0, md5, md5_base64)
            if ttl_in:
                MIN_TTL(sock, peer_ip, ttl_in)
            return

        try:
            sock = self._new_socket(local_ip)
            # MD5 must match the peer side of the TCP, not the local one
            MD5(sock, peer_ip.top(), 0, md5, md5_base64)
            if ttl_in:
                MIN_TTL(sock, peer_ip, ttl_in)
            try:
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
                if local_ip.ipv6():
                    sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
            except (socket.error, AttributeError):
                pass
            sock.setblocking(0)
            # s.settimeout(0.0)
            sock.bind((local_ip.top(), local_port))
            sock.listen(self._backlog)
            self._sockets[sock] = (local_ip.top(), local_port, peer_ip.top(),
                                   md5)
        except socket.error as exc:
            if exc.args[0] == errno.EADDRINUSE:
                raise BindingError(
                    'could not listen on %s:%d, the port may already be in use by another application'
                    % (local_ip, local_port))
            elif exc.args[0] == errno.EADDRNOTAVAIL:
                raise BindingError(
                    'could not listen on %s:%d, this is an invalid address' %
                    (local_ip, local_port))
            raise NetworkError(str(exc))
        except NetworkError as exc:
            self.logger.critical(str(exc), 'network')
            raise exc

    def listen_on(self, local_addr, remote_addr, port, md5_password,
                  md5_base64, ttl_in):
        try:
            if not remote_addr:
                remote_addr = IP.create(
                    '0.0.0.0') if local_addr.ipv4() else IP.create('::')
            self._listen(local_addr, remote_addr, port, md5_password,
                         md5_base64, ttl_in)
            self.logger.debug(
                'listening for BGP session(s) on %s:%d%s' %
                (local_addr, port, ' with MD5' if md5_password else ''),
                'network')
            return True
        except NetworkError as exc:
            if os.geteuid() != 0 and port <= 1024:
                self.logger.critical(
                    'can not bind to %s:%d, you may need to run ExaBGP as root'
                    % (local_addr, port), 'network')
            else:
                self.logger.critical(
                    'can not bind to %s:%d (%s)' %
                    (local_addr, port, str(exc)), 'network')
            self.logger.critical(
                'unset exabgp.tcp.bind if you do not want listen for incoming connections',
                'network')
            self.logger.critical(
                'and check that no other daemon is already binding to port %d'
                % port, 'network')
            return False

    def incoming(self):
        if not self.serving:
            return False

        for sock in self._sockets:
            if sock in self._accepted:
                continue
            try:
                io, _ = sock.accept()
                self._accepted[sock] = io
                self._pending += 1
            except socket.error as exc:
                if exc.errno in error.block:
                    continue
                self.logger.critical(str(exc), 'network')
        if self._pending:
            self._pending -= 1
            return True
        return False

    def _connected(self):
        try:
            for sock, io in list(self._accepted.items()):
                del self._accepted[sock]
                if sock.family == socket.AF_INET:
                    local_ip = io.getpeername()[0]  # local_ip,local_port
                    remote_ip = io.getsockname()[0]  # remote_ip,remote_port
                elif sock.family == socket.AF_INET6:
                    local_ip = io.getpeername()[
                        0]  # local_ip,local_port,local_flow,local_scope
                    remote_ip = io.getsockname()[
                        0]  # remote_ip,remote_port,remote_flow,remote_scope
                else:
                    raise AcceptError('unexpected address family (%d)' %
                                      sock.family)
                fam = self._family_AFI_map[sock.family]
                yield Incoming(fam, remote_ip, local_ip, io)
        except NetworkError as exc:
            self.logger.critical(str(exc), 'network')

    def new_connections(self):
        if not self.serving:
            return
        yield None

        reactor = self._reactor
        ranged_neighbor = []

        for connection in self._connected():
            self.logger.debug('new connection received %s' % connection.name(),
                              'network')
            for key in reactor.peers:
                peer = reactor.peers[key]
                neighbor = peer.neighbor

                connection_local = IP.create(connection.local).address()
                neighbor_peer_start = neighbor.peer_address.address()
                neighbor_peer_next = neighbor_peer_start + neighbor.range_size

                if not neighbor_peer_start <= connection_local < neighbor_peer_next:
                    continue

                connection_peer = IP.create(connection.peer).address()
                neighbor_local = neighbor.local_address.address()

                if connection_peer != neighbor_local:
                    if not neighbor.auto_discovery:
                        continue

                # we found a range matching for this connection
                # but the peer may already have connected, so
                # we need to iterate all individual peers before
                # handling "range" peers
                if neighbor.range_size > 1:
                    ranged_neighbor.append(peer.neighbor)
                    continue

                denied = peer.handle_connection(connection)
                if denied:
                    self.logger.debug(
                        'refused connection from %s due to the state machine' %
                        connection.name(), 'network')
                    break
                self.logger.debug(
                    'accepted connection from %s' % connection.name(),
                    'network')
                break
            else:
                # we did not break (and nothign was found/done or we have group match)
                matched = len(ranged_neighbor)
                if matched > 1:
                    self.logger.debug(
                        'could not accept connection from %s (more than one neighbor match)'
                        % connection.name(), 'network')
                    reactor.asynchronous.schedule(
                        str(uuid.uuid1()), 'sending notification (6,5)',
                        connection.notification(
                            6, 5,
                            'could not accept the connection (more than one neighbor match)'
                        ))
                    return
                if not matched:
                    self.logger.debug(
                        'no session configured for %s' % connection.name(),
                        'network')
                    reactor.asynchronous.schedule(
                        str(uuid.uuid1()), 'sending notification (6,3)',
                        connection.notification(
                            6, 3, 'no session configured for the peer'))
                    return

                new_neighbor = copy.copy(ranged_neighbor[0])
                new_neighbor.range_size = 1
                new_neighbor.generated = True
                new_neighbor.local_address = IP.create(connection.peer)
                new_neighbor.peer_address = IP.create(connection.local)
                new_neighbor.router_id = RouterID.create(connection.local)

                new_peer = Peer(new_neighbor, self)
                denied = new_peer.handle_connection(connection)
                if denied:
                    self.logger.debug(
                        'refused connection from %s due to the state machine' %
                        connection.name(), 'network')
                    return

                reactor.peers[new_neighbor.name()] = new_peer
                return

    def stop(self):
        if not self.serving:
            return

        for sock, (ip, port, _, _) in self._sockets.items():
            sock.close()
            self.logger.info('stopped listening on %s:%d' % (ip, port),
                             'network')

        self._sockets = {}
        self.serving = False
Ejemplo n.º 40
0
class API(object):
    callback = {
        'text': {},
        'json': {},
    }

    # need to sort and reverse, in order for the shorter command to not used by error
    # "show neighbor" should not match "show neighbors"
    functions = sorted([
        'show neighbor',
        'show neighbors',
        'show routes',
        'show routes extensive',
        'announce operational',
        'announce attributes',
        'announce eor',
        'announce flow',
        'announce route',
        'announce route-refresh',
        'announce vpls',
        'announce watchdog',
        'withdraw attributes',
        'withdraw flow',
        'withdraw route',
        'withdraw vpls',
        'withdraw watchdog',
        'flush route',
        'teardown',
        'version',
        'restart',
        'reload',
        'shutdown',
    ],
                       reverse=True)

    def __init__(self, reactor):
        self.reactor = reactor
        self.logger = Logger()
        self.parser = Parser.Text(reactor)

        try:
            for name in self.functions:
                self.callback['text'][name] = Command.Text.callback[name]
        except KeyError:
            raise RuntimeError(
                'The code does not have an implementation for "%s", please code it !'
                % name)

    def log_message(self, message, level='info'):
        self.logger.reactor(message, level)

    def log_failure(self, message, level='error'):
        error = str(self.parser.configuration.tokeniser.error)
        report = '%s\nreason: %s' % (message, error) if error else message
        self.logger.reactor(report, level)

    def text(self, reactor, service, command):
        for registered in self.functions:
            if registered in command:
                self.logger.reactor(
                    "callback | handling '%s' with %s" %
                    (command, self.callback['text'][registered].func_name),
                    'warning')
                # XXX: should we not test the return value ?
                self.callback['text'][registered](self, reactor, service,
                                                  command)
                # reactor.plan(self.callback['text'][registered](self,reactor,service,command),registered)
                return True
        self.logger.reactor(
            "Command from process not understood : %s" % command, 'warning')
        return False

    def shutdown(self):
        self.reactor.api_shutdown()
        return True

    def reload(self):
        self.reactor.api_reload()
        return True

    def restart(self):
        self.reactor.api_restart()
        return True
Ejemplo n.º 41
0
from exabgp.bgp.message import Update
from exabgp.bgp.message import Open
from exabgp.bgp.message.open import Version
from exabgp.bgp.message.open import ASN
from exabgp.bgp.message.open import RouterID
from exabgp.bgp.message.open import HoldTime
from exabgp.bgp.message.open.capability import Capabilities
from exabgp.bgp.message.open.capability import Capability
from exabgp.bgp.message.open.capability import Negotiated
from exabgp.bgp.message.update.nlri import NLRI

from exabgp.configuration.setup import environment
from exabgp.logger import Logger

environment.setup('')
logger = Logger()

bodies = []

body = [
    0x0,
    0x0,  # len withdrawn routes
    # No routes to remove
    # Attributes
    0x0,
    0x30,  # len attributes (48)
    0x40,  # Flag Transitive
    0x1,  # Code : Attribute ID Origin
    0x1,  # len
    0x0,  # Origin : IGP
    0x50,  # Flag Transitive + extended length
Ejemplo n.º 42
0
class Reactor (object):
	# [hex(ord(c)) for c in os.popen('clear').read()]
	clear = concat_bytes_i(character(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a'])

	def __init__ (self, configurations):
		self._ips = environment.settings().tcp.bind
		self._port = environment.settings().tcp.port
		self._stopping = environment.settings().tcp.once

		self.max_loop_time = environment.settings().reactor.speed
		self.early_drop = environment.settings().daemon.drop

		self.processes = None

		self.configuration = Configuration(configurations)
		self.logger = Logger()
		self.async = ASYNC()
		self.signal = Signal()
		self.daemon = Daemon(self)
		self.listener = Listener(self)
		self.api = API(self)

		self.peers = {}

		self._reload_processes = False
		self._saved_pid = False

	def _termination (self,reason):
		self.signal.received = Signal.SHUTDOWN
		self.logger.critical(reason,'reactor')

	def _api_ready (self,sockets,peers):
		sleeptime = 0 if peers or self.async.ready() else self.max_loop_time / 100
		fds = self.processes.fds()
		ios = fds + sockets
		try:
			read,_,_ = select.select(ios,[],[],sleeptime)
			for fd in fds:
				if fd in read:
					read.remove(fd)
			return read
		except select.error as exc:
			err_no,message = exc.args  # pylint: disable=W0633
			if err_no not in error.block:
				raise exc
			return []
		except socket.error as exc:
			# python 3 does not raise on closed FD, but python2 does
			# we have lost a peer and it is causing the select
			# to complain, the code will self-heal, ignore the issue
			# (EBADF from python2 must be ignored if when checkign error.fatal)
			# otherwise sending  notification causes TCP to drop and cause
			# this code to kill ExaBGP
			return []
		except ValueError as exc:
			# The peer closing the TCP connection lead to a negative file descritor
			return []
		except KeyboardInterrupt:
			self._termination('^C received')
			return []

	def _active_peers (self):
		peers = set()
		for key,peer in self.peers.items():
			if not peer.neighbor.passive or peer.proto:
				peers.add(key)
		return peers

	def _completed (self,peers):
		for peer in peers:
			if self.peers[peer].neighbor.rib.outgoing.pending():
				return False
		return True

	def run (self, validate, root):
		self.daemon.daemonise()

		# Make sure we create processes once we have closed file descriptor
		# unfortunately, this must be done before reading the configuration file
		# so we can not do it with dropped privileges
		self.processes = Processes()

		# we have to read the configuration possibly with root privileges
		# as we need the MD5 information when we bind, and root is needed
		# to bind to a port < 1024

		# this is undesirable as :
		# - handling user generated data as root should be avoided
		# - we may not be able to reload the configuration once the privileges are dropped

		# but I can not see any way to avoid it
		for ip in self._ips:
			if not self.listener.listen_on(ip, None, self._port, None, False, None):
				return False

		if not self.load():
			return False

		if validate:  # only validate configuration
			self.logger.warning('','configuration')
			self.logger.warning('parsed Neighbors, un-templated','configuration')
			self.logger.warning('------------------------------','configuration')
			self.logger.warning('','configuration')
			for key in self.peers:
				self.logger.warning(str(self.peers[key].neighbor),'configuration')
				self.logger.warning('','configuration')
			return True

		for neighbor in self.configuration.neighbors.values():
			if neighbor.listen:
				if not self.listener.listen_on(neighbor.md5_ip, neighbor.peer_address, neighbor.listen, neighbor.md5_password, neighbor.md5_base64, neighbor.ttl_in):
					return False

		if not self.early_drop:
			self.processes.start(self.configuration.processes)

		if not self.daemon.drop_privileges():
			self.logger.critical('could not drop privileges to \'%s\' refusing to run as root' % self.daemon.user,'reactor')
			self.logger.critical('set the environmemnt value exabgp.daemon.user to change the unprivileged user','reactor')
			return

		if self.early_drop:
			self.processes.start(self.configuration.processes)

		# This is required to make sure we can write in the log location as we now have dropped root privileges
		if not self.logger.restart():
			self.logger.critical('could not setup the logger, aborting','reactor')
			return

		if not self.daemon.savepid():
			return

		# did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ?
		reload_completed = False

		wait = environment.settings().tcp.delay
		if wait:
			sleeptime = (wait * 60) - int(time.time()) % (wait * 60)
			self.logger.debug('waiting for %d seconds before connecting' % sleeptime,'reactor')
			time.sleep(float(sleeptime))

		workers = {}
		peers = set()

		while True:
			try:
				if self.signal.received:
					for key in self.peers:
						if self.peers[key].neighbor.api['signal']:
							self.peers[key].reactor.processes.signal(self.peers[key].neighbor,self.signal.number)

					signaled = self.signal.received
					self.signal.rearm()

					if signaled == Signal.SHUTDOWN:
						self.shutdown()
						break

					if signaled == Signal.RESTART:
						self.restart()
						continue

					if not reload_completed:
						continue

					if signaled == Signal.FULL_RELOAD:
						self._reload_processes = True

					if signaled in (Signal.RELOAD, Signal.FULL_RELOAD):
						self.load()
						self.processes.start(self.configuration.processes,self._reload_processes)
						self._reload_processes = False
						continue

				if self.listener.incoming():
					# check all incoming connection
					self.async.schedule(str(uuid.uuid1()),'check new connection',self.listener.new_connections())

				peers = self._active_peers()
				if self._completed(peers):
					reload_completed = True

				# give a turn to all the peers
				for key in list(peers):
					peer = self.peers[key]
					action = peer.run()

					# .run() returns an ACTION enum:
					# * immediate if it wants to be called again
					# * later if it should be called again but has no work atm
					# * close if it is finished and is closing down, or restarting
					if action == ACTION.CLOSE:
						if key in self.peers:
							del self.peers[key]
						peers.discard(key)
					# we are loosing this peer, not point to schedule more process work
					elif action == ACTION.LATER:
						for io in peer.sockets():
							workers[io] = key
						# no need to come back to it before a a full cycle
						peers.discard(key)

					if not peers:
						break

				# read at least on message per process if there is some and parse it
				for service,command in self.processes.received():
					self.api.text(self,service,command)

				self.async.run()

				for io in self._api_ready(list(workers),peers):
					peers.add(workers[io])
					del workers[io]

				if self._stopping and not self.peers.keys():
					self._termination('exiting on peer termination')

			except KeyboardInterrupt:
				self._termination('^C received')
			# socket.error is a subclass of IOError (so catch it first)
			except socket.error:
				self._termination('socket error received')
			except IOError:
				self._termination('I/O Error received, most likely ^C during IO')
			except SystemExit:
				self._termination('exiting')
			except ProcessError:
				self._termination('Problem when sending message(s) to helper program, stopping')
			except select.error:
				self._termination('problem using select, stopping')

	def shutdown (self):
		"""Terminate all the current BGP connections"""
		self.logger.critical('performing shutdown','reactor')
		if self.listener:
			self.listener.stop()
			self.listener = None
		for key in self.peers.keys():
			self.peers[key].stop()
		self.async.clear()
		self.processes.terminate()
		self.daemon.removepid()
		self._stopping = True

	def load (self):
		"""Reload the configuration and send to the peer the route which changed"""
		self.logger.notice('performing reload of exabgp %s' % version,'configuration')

		reloaded = self.configuration.reload()

		if not reloaded:
			#
			# Careful the string below is used but the QA code to check for sucess of failure
			self.logger.error('problem with the configuration file, no change done','configuration')
			# Careful the string above is used but the QA code to check for sucess of failure
			#
			self.logger.error(str(self.configuration.error),'configuration')
			return False

		for key, peer in self.peers.items():
			if key not in self.configuration.neighbors:
				self.logger.debug('removing peer: %s' % peer.neighbor.name(),'reactor')
				peer.stop()

		for key, neighbor in self.configuration.neighbors.items():
			# new peer
			if key not in self.peers:
				self.logger.debug('new peer: %s' % neighbor.name(),'reactor')
				peer = Peer(neighbor,self)
				self.peers[key] = peer
			# modified peer
			elif self.peers[key].neighbor != neighbor:
				self.logger.debug('peer definition change, establishing a new connection for %s' % str(key),'reactor')
				self.peers[key].reestablish(neighbor)
			# same peer but perhaps not the routes
			else:
				# finding what route changed and sending the delta is not obvious
				self.logger.debug('peer definition identical, updating peer routes if required for %s' % str(key),'reactor')
				self.peers[key].reconfigure(neighbor)
			for ip in self._ips:
				if ip.afi == neighbor.peer_address.afi:
					self.listener.listen_on(ip, neighbor.peer_address, self._port, neighbor.md5_password, neighbor.md5_base64, None)
		self.logger.notice('loaded new configuration successfully','reactor')

		return True

	def restart (self):
		"""Kill the BGP session and restart it"""
		self.logger.notice('performing restart of exabgp %s' % version,'reactor')
		self.configuration.reload()

		for key in self.peers.keys():
			if key not in self.configuration.neighbors.keys():
				neighbor = self.configuration.neighbors[key]
				self.logger.debug('removing Peer %s' % neighbor.name(),'reactor')
				self.peers[key].stop()
			else:
				self.peers[key].reestablish()
		self.processes.start(self.configuration.processes,True)
Ejemplo n.º 43
0
    def __init__(self, me, holdtime):
        self.logger = Logger()
        self.me = me

        self.keepalive = holdtime.keepalive()
        self.last_sent = int(time.time())
Ejemplo n.º 44
0
class Signal(object):
    NONE = 0
    SHUTDOWN = 1
    RESTART = 2
    RELOAD = 4
    FULL_RELOAD = 8

    def __init__(self):
        self.logger = Logger()
        self.received = self.NONE
        self.number = 0
        self.rearm()

    def rearm(self):
        self.received = Signal.NONE
        self.number = 0

        signal.signal(signal.SIGTERM, self.sigterm)
        signal.signal(signal.SIGHUP, self.sighup)
        signal.signal(signal.SIGALRM, self.sigalrm)
        signal.signal(signal.SIGUSR1, self.sigusr1)
        signal.signal(signal.SIGUSR2, self.sigusr2)

    def sigterm(self, signum, frame):
        self.logger.reactor('SIG TERM received')
        if self.received:
            self.logger.reactor('ignoring - still handling previous signal')
            return
        self.logger.reactor('scheduling shutdown')
        self.received = self.SHUTDOWN
        self.number = signum

    def sighup(self, signum, frame):
        self.logger.reactor('SIG HUP received')
        if self.received:
            self.logger.reactor('ignoring - still handling previous signal')
            return
        self.logger.reactor('scheduling shutdown')
        self.received = self.SHUTDOWN
        self.number = signum

    def sigalrm(self, signum, frame):
        self.logger.reactor('SIG ALRM received')
        if self.received:
            self.logger.reactor('ignoring - still handling previous signal')
            return
        self.logger.reactor('scheduling restart')
        self.received = self.RESTART
        self.number = signum

    def sigusr1(self, signum, frame):
        self.logger.reactor('SIG USR1 received')
        if self.received:
            self.logger.reactor('ignoring - still handling previous signal')
            return
        self.logger.reactor('scheduling reload of configuration')
        self.received = self.RELOAD
        self.number = signum

    def sigusr2(self, signum, frame):
        self.logger.reactor('SIG USR1 received')
        if self.received:
            self.logger.reactor('ignoring - still handling previous signal')
            return
        self.logger.reactor('scheduling reload of configuration and processes')
        self.received = self.FULL_RELOAD
        self.number = signum
Ejemplo n.º 45
0
    def _factory(self, data):
        if not data:
            return self

        # We do not care if the attribute are transitive or not as we do not redistribute
        flag = Flag(ord(data[0]))
        code = AID(ord(data[1]))

        if flag & Flag.EXTENDED_LENGTH:
            length = unpack('!H', data[2:4])[0]
            offset = 4
        else:
            length = ord(data[2])
            offset = 3

        if self.hasmp:
            if code not in (AID.MP_REACH_NLRI, AID.MP_UNREACH_NLRI):
                self.cacheable = False
                self.prefix = ''
        else:
            self.prefix += data[:offset + length]

        data = data[offset:]
        next = data[length:]
        attribute = data[:length]

        logger = Logger()
        logger.parser(
            LazyFormat(
                "parsing flag %x type %02x (%s) len %02x %s" %
                (flag, int(code), code, length, 'payload ' if length else ''),
                od, data[:length]))

        if code == AID.ORIGIN and flag.matches(Origin.FLAG):
            # This if block should never be called anymore ...
            if not self.add_from_cache(code, attribute):
                self.add(Origin(ord(attribute)), attribute)
            return self.factory(next)

        # only 2-4% of duplicated data - is it worth to cache ?
        if code == AID.AS_PATH and flag.matches(ASPath.FLAG):
            if length:
                # we store the AS4_PATH as AS_PATH, do not over-write
                if not self.has(code):
                    if not self.add_from_cache(code, attribute):
                        self.add(self.__new_ASPath(attribute), attribute)
            return self.factory(next)

        if code == AID.AS4_PATH and flag.matches(AS4Path.FLAG):
            if length:
                # ignore the AS4_PATH on new spekers as required by RFC 4893 section 4.1
                if not self.negotiated.asn4:
                    # This replace the old AS_PATH
                    if not self.add_from_cache(code, attribute):
                        self.add(self.__new_ASPath4(attribute), attribute)
            return self.factory(next)

        if code == AID.NEXT_HOP and flag.matches(NextHop.FLAG):
            # XXX: FIXME: we are double caching the NH (once in the class, once here)
            if not self.add_from_cache(code, attribute):
                self.add(cachedNextHop(attribute), attribute)
            return self.factory(next)

        if code == AID.MED and flag.matches(MED.FLAG):
            if not self.add_from_cache(code, attribute):
                self.add(MED(attribute), attribute)
            return self.factory(next)

        if code == AID.LOCAL_PREF and flag.matches(LocalPreference.FLAG):
            if not self.add_from_cache(code, attribute):
                self.add(LocalPreference(attribute), attribute)
            return self.factory(next)

        if code == AID.ATOMIC_AGGREGATE and flag.matches(AtomicAggregate.FLAG):
            if not self.add_from_cache(code, attribute):
                raise Notify(
                    3, 2, 'invalid ATOMIC_AGGREGATE %s' %
                    [hex(ord(_)) for _ in attribute])
            return self.factory(next)

        if code == AID.AGGREGATOR and flag.matches(Aggregator.FLAG):
            # AS4_AGGREGATOR are stored as AGGREGATOR - so do not overwrite if exists
            if not self.has(code):
                if not self.add_from_cache(AID.AGGREGATOR, attribute):
                    self.add(Aggregator(attribute), attribute)
            return self.factory(next)

        if code == AID.AS4_AGGREGATOR and flag.matches(Aggregator.FLAG):
            if not self.add_from_cache(AID.AGGREGATOR, attribute):
                self.add(Aggregator(attribute), attribute)
            return self.factory(next)

        if code == AID.COMMUNITY and flag.matches(Communities.FLAG):
            if not self.add_from_cache(code, attribute):
                self.add(self.__new_communities(attribute), attribute)
            return self.factory(next)

        if code == AID.ORIGINATOR_ID and flag.matches(OriginatorID.FLAG):
            if not self.add_from_cache(code, attribute):
                self.add(OriginatorID(AFI.ipv4, SAFI.unicast, data[:4]),
                         attribute)
            return self.factory(next)

        if code == AID.CLUSTER_LIST and flag.matches(ClusterList.FLAG):
            if not self.add_from_cache(code, attribute):
                self.add(ClusterList(attribute), attribute)
            return self.factory(next)

        if code == AID.EXTENDED_COMMUNITY and flag.matches(ECommunities.FLAG):
            if not self.add_from_cache(code, attribute):
                self.add(self.__new_extended_communities(attribute), attribute)
            return self.factory(next)

        if code == AID.AIGP and flag.matches(AIGP.FLAG):
            if self.negotiated.neighbor.aigp:
                if not self.add_from_cache(code, attribute):
                    self.add(AIGP(attribute), attribute)
            return self.factory(next)

        if code == AID.MP_UNREACH_NLRI and flag.matches(MPURNLRI.FLAG):
            self.hasmp = True

            # -- Reading AFI/SAFI
            data = data[:length]
            afi, safi = unpack('!HB', data[:3])
            offset = 3
            data = data[offset:]

            if (afi, safi) not in self.negotiated.families:
                raise Notify(
                    3, 0,
                    'presented a non-negotiated family %d/%d' % (afi, safi))

            # Is the peer going to send us some Path Information with the route (AddPath)
            addpath = self.negotiated.addpath.receive(afi, safi)

            # XXX: we do assume that it is an EOR. most likely harmless
            if not data:
                self.mp_withdraw.append(NLRIEOR(afi, safi, IN.announced))
                return self.factory(next)

            while data:
                length, nlri = self.nlriFactory(afi, safi, data, addpath, None,
                                                IN.withdrawn)
                self.mp_withdraw.append(nlri)
                data = data[length:]
                logger.parser(
                    LazyFormat("parsed withdraw mp nlri %s payload " % nlri,
                               od, data[:length]))

            return self.factory(next)

        if code == AID.MP_REACH_NLRI and flag.matches(MPRNLRI.FLAG):
            self.hasmp = True

            data = data[:length]
            # -- Reading AFI/SAFI
            afi, safi = unpack('!HB', data[:3])
            offset = 3

            # we do not want to accept unknown families
            if (afi, safi) not in self.negotiated.families:
                raise Notify(
                    3, 0,
                    'presented a non-negotiated family %d/%d' % (afi, safi))

            # -- Reading length of next-hop
            len_nh = ord(data[offset])
            offset += 1

            rd = 0

            # check next-hope size
            if afi == AFI.ipv4:
                if safi in (SAFI.unicast, SAFI.multicast):
                    if len_nh != 4:
                        raise Notify(
                            3, 0,
                            'invalid ipv4 unicast/multicast next-hop length %d expected 4'
                            % len_nh)
                elif safi in (SAFI.mpls_vpn, ):
                    if len_nh != 12:
                        raise Notify(
                            3, 0,
                            'invalid ipv4 mpls_vpn next-hop length %d expected 12'
                            % len_nh)
                    rd = 8
                elif safi in (SAFI.flow_ip, ):
                    if len_nh not in (0, 4):
                        raise Notify(
                            3, 0,
                            'invalid ipv4 flow_ip next-hop length %d expected 4'
                            % len_nh)
                elif safi in (SAFI.flow_vpn, ):
                    if len_nh not in (0, 4):
                        raise Notify(
                            3, 0,
                            'invalid ipv4 flow_vpn next-hop length %d expected 4'
                            % len_nh)
            elif afi == AFI.ipv6:
                if safi in (SAFI.unicast, ):
                    if len_nh not in (16, 32):
                        raise Notify(
                            3, 0,
                            'invalid ipv6 unicast next-hop length %d expected 16 or 32'
                            % len_nh)
                elif safi in (SAFI.mpls_vpn, ):
                    if len_nh not in (24, 40):
                        raise Notify(
                            3, 0,
                            'invalid ipv6 mpls_vpn next-hop length %d expected 24 or 40'
                            % len_nh)
                    rd = 8
                elif safi in (SAFI.flow_ip, ):
                    if len_nh not in (0, 16, 32):
                        raise Notify(
                            3, 0,
                            'invalid ipv6 flow_ip next-hop length %d expected 0, 16 or 32'
                            % len_nh)
                elif safi in (SAFI.flow_vpn, ):
                    if len_nh not in (0, 16, 32):
                        raise Notify(
                            3, 0,
                            'invalid ipv6 flow_vpn next-hop length %d expected 0, 16 or 32'
                            % len_nh)
            size = len_nh - rd

            # XXX: FIXME: GET IT FROM CACHE HERE ?
            nh = data[offset + rd:offset + rd + size]

            # chech the RD is well zero
            if rd and sum([int(ord(_)) for _ in data[offset:8]]) != 0:
                raise Notify(
                    3, 0,
                    "MP_REACH_NLRI next-hop's route-distinguisher must be zero"
                )

            offset += len_nh

            # Skip a reserved bit as somone had to bug us !
            reserved = ord(data[offset])
            offset += 1

            if reserved != 0:
                raise Notify(3, 0,
                             'the reserved bit of MP_REACH_NLRI is not zero')

            # Is the peer going to send us some Path Information with the route (AddPath)
            addpath = self.negotiated.addpath.receive(afi, safi)

            # Reading the NLRIs
            data = data[offset:]

            while data:
                length, nlri = self.nlriFactory(afi, safi, data, addpath, nh,
                                                IN.announced)
                self.mp_announce.append(nlri)
                logger.parser(
                    LazyFormat("parsed announce mp nlri %s payload " % nlri,
                               od, data[:length]))
                data = data[length:]
            return self.factory(next)

        if flag & Flag.TRANSITIVE:
            if code in self.known_attributes:
                # XXX: FIXME: we should really close the session
                logger.parser(
                    'ignoring implemented invalid transitive attribute (code 0x%02X, flag 0x%02X)'
                    % (code, flag))
                return self.factory(next)

            if not self.add_from_cache(code, attribute):
                self.add(UnknownAttribute(code, flag, attribute), attribute)
            return self.factory(next)

        logger.parser(
            'ignoring non-transitive attribute (code 0x%02X, flag 0x%02X)' %
            (code, flag))
        return self.factory(next)
Ejemplo n.º 46
0
def check_neighbor(neighbors):
    logger = Logger()
    logger._option.parser = True

    if not neighbors:
        logger.parser('\ncould not find neighbor(s) to check')
        return False

    logger.parser('\ndecoding routes in configuration')

    for name in neighbors.keys():
        neighbor = neighbors[name]

        path = {}
        for f in NLRI.known_families():
            if neighbor.add_path:
                path[f] = neighbor.add_path

        capa = Capabilities().new(neighbor, False)
        if path:
            capa[Capability.CODE.ADD_PATH] = path
        capa[Capability.CODE.MULTIPROTOCOL] = neighbor.families()

        o1 = Open(Version(4), ASN(neighbor.local_as), HoldTime(180),
                  RouterID(neighbor.local_address.top()), capa)
        o2 = Open(Version(4), ASN(neighbor.peer_as), HoldTime(180),
                  RouterID(neighbor.peer_address.top()), capa)
        negotiated = Negotiated(neighbor)
        negotiated.sent(o1)
        negotiated.received(o2)
        # grouped = False

        for _ in neighbor.rib.outgoing.updates(False):
            pass

        for change1 in neighbor.rib.outgoing.sent_changes():
            str1 = change1.extensive()
            packed = list(
                Update([change1.nlri],
                       change1.attributes).messages(negotiated))
            pack1 = packed[0]

            logger.parser('parsed route requires %d updates' % len(packed))
            logger.parser('update size is %d' % len(pack1))

            logger.parser('parsed route %s' % str1)
            logger.parser('parsed hex   %s' % od(pack1))

            # This does not take the BGP header - let's assume we will not break that :)
            try:
                logger.parser('')  # new line

                pack1s = pack1[19:] if pack1.startswith('\xFF' * 16) else pack1
                update = Update.unpack_message(pack1s, negotiated)

                change2 = Change(update.nlris[0], update.attributes)
                str2 = change2.extensive()
                pack2 = list(
                    Update([update.nlris[0]],
                           update.attributes).messages(negotiated))[0]

                logger.parser('recoded route %s' % str2)
                logger.parser('recoded hex   %s' % od(pack2))

                str1 = str1.replace('attribute [ 0x04 0x80 0x00000064 ]',
                                    'med 100')
                str1r = str1.lower().replace(' med 100', '').replace(
                    ' local-preference 100', '').replace(' origin igp', '')
                str2r = str2.lower().replace(' med 100', '').replace(
                    ' local-preference 100', '').replace(' origin igp', '')

                if 'next-hop self' in str1r:
                    if ':' in str1r:
                        str1r = str1r.replace('next-hop self', 'next-hop ::1')
                    else:
                        str1r = str1r.replace('next-hop self',
                                              'next-hop 127.0.0.1')

                if ' name ' in str1r:
                    parts = str1r.split(' ')
                    pos = parts.index('name')
                    str1r = ' '.join(parts[:pos] + parts[pos + 2:])

                skip = False

                if str1r != str2r:
                    if 'attribute [' in str1r and ' 0x00 ' in str1r:
                        # we do not decode non-transitive attributes
                        logger.parser(
                            'skipping string check on update with non-transitive attribute(s)'
                        )
                        skip = True
                    else:
                        logger.parser('strings are different:')
                        logger.parser('[%s]' % (str1r))
                        logger.parser('[%s]' % (str2r))
                        return False
                else:
                    logger.parser('strings are fine')

                if skip:
                    logger.parser(
                        'skipping encoding for update with non-transitive attribute(s)'
                    )
                elif pack1 != pack2:
                    logger.parser('encoding are different')
                    logger.parser('[%s]' % (od(pack1)))
                    logger.parser('[%s]' % (od(pack2)))
                    return False
                else:
                    logger.parser('encoding is fine')
                    logger.parser('----------------------------------------')

                logger.parser('JSON nlri %s' % change1.nlri.json())
                logger.parser('JSON attr %s' % change1.attributes.json())

            except Notify, exc:
                logger.parser('----------------------------------------')
                logger.parser(str(exc))
                logger.parser('----------------------------------------')
                return False
        neighbor.rib.clear()
Ejemplo n.º 47
0
class Processes(object):
    # how many time can a process can respawn in the time interval
    respawn_number = 5
    respawn_timemask = 0xFFFFFF - pow(
        2, 6) + 1  # '0b111111111111111111000000' (around a minute, 63 seconds)

    _dispatch = {}

    # names = {
    # 	Message.ID.NOTIFICATION  : 'neighbor-changes',
    # 	Message.ID.OPEN          : 'receive-opens',
    # 	Message.ID.KEEPALIVE     : 'receive-keepalives',
    # 	Message.ID.UPDATE        : 'receive-updates',
    # 	Message.ID.ROUTE_REFRESH : 'receive-refresh',
    # 	Message.ID.OPERATIONAL   : 'receive-operational',
    # }

    def __init__(self, reactor):
        self.logger = Logger()
        self.reactor = reactor
        self.clean()
        self.silence = False

        from exabgp.configuration.environment import environment
        self.highres = environment.settings().api.highres

    def clean(self):
        self._process = {}
        self._encoder = {}
        self._events = {}
        self._neighbor_process = {}
        self._broken = []
        self._respawning = {}

    def _terminate(self, process):
        self.logger.processes("Terminating process %s" % process)
        try:
            self._process[process].terminate()
        except OSError:
            # the process is most likely already dead
            pass
        self._process[process].wait()
        del self._process[process]

    def terminate(self):
        for process in list(self._process):
            if not self.silence:
                try:
                    self.write(process, self._encoder[process].shutdown())
                except ProcessError:
                    pass
        self.silence = True
        time.sleep(0.1)
        for process in list(self._process):
            try:
                self._terminate(process)
            except OSError:
                # we most likely received a SIGTERM signal and our child is already dead
                self.logger.processes("child process %s was already dead" %
                                      process)
                pass
        self.clean()

    def _start(self, process):
        events = self.reactor.configuration.process[process]
        for event, present in events.iteritems():
            if event in ('run', 'encoder'):
                continue
            if present:
                self._events.setdefault(process, []).append(event)

        try:
            if process in self._process:
                self.logger.processes("process already running")
                return
            if not process in self.reactor.configuration.process:
                self.logger.processes(
                    "Can not start process, no configuration for it (anymore ?)"
                )
                return

            # Prevent some weird termcap data to be created at the start of the PIPE
            # \x1b[?1034h (no-eol) (esc)
            os.environ['TERM'] = 'dumb'

            run = self.reactor.configuration.process[process].get('run', '')
            if run:
                api = self.reactor.configuration.process[process]['encoder']
                self._encoder[process] = JSON(
                    '3.4.6', self.highres) if api == 'json' else Text('3.3.2')

                self._process[process] = subprocess.Popen(
                    run,
                    stdin=subprocess.PIPE,
                    stdout=subprocess.PIPE,
                    preexec_fn=preexec_helper
                    # This flags exists for python 2.7.3 in the documentation but on on my MAC
                    # creationflags=subprocess.CREATE_NEW_PROCESS_GROUP
                )
                fcntl.fcntl(self._process[process].stdout.fileno(),
                            fcntl.F_SETFL, os.O_NONBLOCK)

                self.logger.processes("Forked process %s" % process)

                around_now = int(time.time()) & self.respawn_timemask
                if process in self._respawning:
                    if around_now in self._respawning[process]:
                        self._respawning[process][around_now] += 1
                        # we are respawning too fast
                        if self._respawning[process][
                                around_now] > self.respawn_number:
                            self.logger.processes(
                                "Too many respawn for %s (%d) terminating program"
                                % (process, self.respawn_number), 'critical')
                            raise ProcessError()
                    else:
                        # reset long time since last respawn
                        self._respawning[process] = {around_now: 1}
                else:
                    # record respawing
                    self._respawning[process] = {around_now: 1}

            neighbor = self.reactor.configuration.process[process]['neighbor']
            self._neighbor_process.setdefault(neighbor, []).append(process)
        except (subprocess.CalledProcessError, OSError, ValueError), e:
            self._broken.append(process)
            self.logger.processes("Could not start process %s" % process)
            self.logger.processes("reason: %s" % str(e))
Ejemplo n.º 48
0
def check_update(neighbor, raw):
    logger = Logger()
    logger._option.parser = True
    logger.parser('\ndecoding routes in configuration')

    neighbor = neighbor[neighbor.keys()[0]]

    path = {}
    for f in NLRI.known_families():
        if neighbor.add_path:
            path[f] = neighbor.add_path

    capa = Capabilities().new(neighbor, False)
    capa[Capability.CODE.ADD_PATH] = path
    capa[Capability.CODE.MULTIPROTOCOL] = neighbor.families()
    # capa[Capability.CODE.FOUR_BYTES_ASN] = True

    routerid_1 = str(neighbor.router_id)
    routerid_2 = '.'.join(
        str((int(_) + 1) % 250)
        for _ in str(neighbor.router_id).split('.', -1))

    o1 = Open(Version(4), ASN(neighbor.local_as), HoldTime(180),
              RouterID(routerid_1), capa)
    o2 = Open(Version(4), ASN(neighbor.peer_as), HoldTime(180),
              RouterID(routerid_2), capa)
    negotiated = Negotiated(neighbor)
    negotiated.sent(o1)
    negotiated.received(o2)
    # grouped = False

    while raw:
        if raw.startswith('\xff' * 16):
            kind = ord(raw[18])
            size = (ord(raw[16]) << 16) + (ord(raw[17]))

            injected, raw = raw[19:size], raw[size:]

            if kind == 2:
                logger.parser('the message is an update')
                decoding = 'update'
            else:
                logger.parser('the message is not an update (%d) - aborting' %
                              kind)
                return False
        else:
            logger.parser(
                'header missing, assuming this message is ONE update')
            decoding = 'update'
            injected, raw = raw, ''

        try:
            # This does not take the BGP header - let's assume we will not break that :)
            update = Update.unpack_message(injected, negotiated)
        except KeyboardInterrupt:
            raise
        except Notify:
            logger.parser('could not parse the message')
            logger.parser(traceback.format_exc())
            return False
        except StandardError:
            logger.parser('could not parse the message')
            logger.parser(traceback.format_exc())
            return False

        logger.parser('')  # new line
        for number in range(len(update.nlris)):
            change = Change(update.nlris[number], update.attributes)
            logger.parser('decoded %s %s %s' %
                          (decoding, change.nlri.action, change.extensive()))
        logger.parser(
            'update json %s' %
            Response.JSON(json_version).update(neighbor, 'in', update, '', ''))

    return True
Ejemplo n.º 49
0
def run(env, comment, configurations, pid=0):
    logger = Logger()

    if comment:
        logger.configuration(comment)

    if not env.profile.enable:
        ok = Reactor(configurations).run()
        __exit(env.debug.memory, 0 if ok else 1)

    try:
        import cProfile as profile
    except ImportError:
        import profile

    if not env.profile.file or env.profile.file == 'stdout':
        ok = profile.run('Reactor(configurations).run()')
        __exit(env.debug.memory, 0 if ok else 1)

    if pid:
        profile_name = "%s-pid-%d" % (env.profile.file, pid)
    else:
        profile_name = env.profile.file

    notice = ''
    if os.path.isdir(profile_name):
        notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profile_name
    if os.path.exists(profile_name):
        notice = 'profile can not use this filename as outpout, it already exists (%s)' % profile_name

    if not notice:
        logger.reactor('profiling ....')
        profiler = profile.Profile()
        profiler.enable()
        try:
            ok = Reactor(configurations).run()
        except Exception:
            raise
        finally:
            profiler.disable()
            kprofile = lsprofcalltree.KCacheGrind(profiler)

            with open(profile_name, 'w+') as write:
                kprofile.output(write)

            __exit(env.debug.memory, 0 if ok else 1)
    else:
        logger.reactor("-" * len(notice))
        logger.reactor(notice)
        logger.reactor("-" * len(notice))
        Reactor(configurations).run()
        __exit(env.debug.memory, 1)
Ejemplo n.º 50
0
	def messages (self, negotiated, include_withdraw=True):
		# sort the nlris

		nlris = []
		mp_nlris = {}

		for nlri in sorted(self.nlris):
			if nlri.family() in negotiated.families:
				if nlri.afi == AFI.ipv4 and nlri.safi in [SAFI.unicast, SAFI.multicast] and nlri.nexthop.afi == AFI.ipv4:
					nlris.append(nlri)
				else:
					mp_nlris.setdefault(nlri.family(), {}).setdefault(nlri.action, []).append(nlri)

		if not nlris and not mp_nlris:
			return

		attr = self.attributes.pack(negotiated, True)

		# Withdraws/NLRIS (IPv4 unicast and multicast)
		msg_size = negotiated.msg_size - 19 - 2 - 2 - len(attr)  # 2 bytes for each of the two prefix() header

		if msg_size < 0:
			# raise Notify(6,0,'attributes size is so large we can not even pack one NLRI')
			Logger().critical('attributes size is so large we can not even pack one NLRI','parser')
			return

		if msg_size == 0 and (nlris or mp_nlris):
			# raise Notify(6,0,'attributes size is so large we can not even pack one NLRI')
			Logger().critical('attributes size is so large we can not even pack one NLRI','parser')
			return

		withdraws = b''
		announced = b''
		for nlri in nlris:
			packed = nlri.pack(negotiated)
			if len(announced + withdraws + packed) <= msg_size:
				if nlri.action == OUT.ANNOUNCE:
					announced += packed
				elif include_withdraw:
					withdraws += packed
				continue

			if not withdraws and not announced:
				# raise Notify(6,0,'attributes size is so large we can not even pack one NLRI')
				Logger().critical('attributes size is so large we can not even pack one NLRI','parser')
				return

			if announced:
				yield self._message(Update.prefix(withdraws) + Update.prefix(attr) + announced)
			else:
				yield self._message(Update.prefix(withdraws) + Update.prefix(b'') + announced)

			if nlri.action == OUT.ANNOUNCE:
				announced = packed
				withdraws = b''
			elif include_withdraw:
				withdraws = packed
				announced = b''
			else:
				withdraws = b''
				announced = b''

		if announced or withdraws:
			if announced:
				yield self._message(Update.prefix(withdraws) + Update.prefix(attr) + announced)
			else:
				yield self._message(Update.prefix(withdraws) + Update.prefix(b'') + announced)

		for family in mp_nlris.keys():
			afi, safi = family
			mp_reach = b''
			mp_unreach = b''
			mp_announce = MPRNLRI(afi, safi, mp_nlris[family].get(OUT.ANNOUNCE, []))
			mp_withdraw = MPURNLRI(afi, safi, mp_nlris[family].get(OUT.WITHDRAW, []))

			for mprnlri in mp_announce.packed_attributes(negotiated, msg_size - len(withdraws + announced)):
				if mp_reach:
					yield self._message(Update.prefix(withdraws) + Update.prefix(attr + mp_reach) + announced)
					announced = b''
					withdraws = b''
				mp_reach = mprnlri

			if include_withdraw:
				for mpurnlri in mp_withdraw.packed_attributes(negotiated, msg_size - len(withdraws + announced + mp_reach)):
					if mp_unreach:
						yield self._message(Update.prefix(withdraws) + Update.prefix(attr + mp_unreach + mp_reach) + announced)
						mp_reach = b''
						announced = b''
						withdraws = b''
					mp_unreach = mpurnlri

			yield self._message(Update.prefix(withdraws) + Update.prefix(attr + mp_unreach + mp_reach) + announced)  # yield mpr/mpur per family
			withdraws = b''
			announced = b''
Ejemplo n.º 51
0
 def __init__(self, reactor):
     self.reactor = reactor
     self.logger = Logger()
     self.configuration = Configuration([])
Ejemplo n.º 52
0
def run(env, comment, configuration, pid=0):
    from exabgp.logger import Logger
    logger = Logger()

    if comment:
        logger.configuration(comment)

    if not env.profile.enable:
        Reactor(configuration).run()
        __exit(env.debug.memory, 0)

    try:
        import cProfile as profile
    except:
        import profile

    if not env.profile.file or env.profile.file == 'stdout':
        profile.run('Reactor(configuration).run()')
        __exit(env.debug.memory, 0)

    if pid:
        profile_name = "%s-pid-%d" % (env.profile.file, pid)
    else:
        profile_name = env.profile.file

    notice = ''
    if os.path.isdir(profile_name):
        notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profile_name
    if os.path.exists(profile_name):
        notice = 'profile can not use this filename as outpout, it already exists (%s)' % profile_name

    if not notice:
        logger.profile('profiling ....')
        profile.run('Reactor(configuration).run()', filename=profile_name)
        __exit(env.debug.memory, 0)
    else:
        logger.profile("-" * len(notice))
        logger.profile(notice)
        logger.profile("-" * len(notice))
        Reactor(configuration).run()
        __exit(env.debug.memory, 0)
Ejemplo n.º 53
0
 def __init__(self, reactor):
     self.logger = Logger()
     self.reactor = reactor
     self.clean()
     self.silence = False
Ejemplo n.º 54
0
        env.debug.memory = True

    configurations = []
    # check the file only once that we have parsed all the command line options and allowed them to run
    if options.configuration:
        for f in options.configuration:
            configurations.append(os.path.realpath(os.path.normpath(f)))
    else:
        parser.print_help()
        print '\nno configuration file provided'
        sys.exit(1)

    for configuration in configurations:
        if not os.path.isfile(configuration):
            from exabgp.logger import Logger
            logger = Logger()
            logger.configuration(
                'the argument passed as configuration is not a file', 'error')
            sys.exit(1)

    from exabgp.bgp.message.update.attribute.nexthop import NextHop
    NextHop.caching = env.cache.nexthops

    from exabgp.bgp.message.update.attribute.communities import Community
    Community.caching = env.cache.attributes

    if len(configurations) == 1:
        run(env, comment, configuration)

    if not (env.log.destination in ('syslog', 'stdout', 'stderr')
            or env.log.destination.startswith('host:')):
Ejemplo n.º 55
0
class Daemon(object):
    def __init__(self, reactor):
        self.pid = environment.settings().daemon.pid
        self.user = environment.settings().daemon.user
        self.daemonize = environment.settings().daemon.daemonize
        self.umask = environment.settings().daemon.umask

        self.logger = Logger()

        self.reactor = reactor

        os.chdir('/')
        os.umask(self.umask)

    def check_pid(self, pid):
        if pid < 0:  # user input error
            return False
        if pid == 0:  # all processes
            return False
        try:
            os.kill(pid, 0)
            return True
        except OSError as err:
            if err.errno == errno.EPERM:  # a process we were denied access to
                return True
            if err.errno == errno.ESRCH:  # No such process
                return False
            # should never happen
            return False

    def savepid(self):
        self._saved_pid = False

        if not self.pid:
            return True

        ownid = os.getpid()

        flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
        mode = ((os.R_OK | os.W_OK) << 6) | (os.R_OK << 3) | os.R_OK

        try:
            fd = os.open(self.pid, flags, mode)
        except OSError:
            try:
                pid = open(self.pid, 'r').readline().strip()
                if self.check_pid(int(pid)):
                    self.logger.daemon(
                        "PIDfile already exists and program still running %s" %
                        self.pid)
                    return False
                else:
                    # If pid is not running, reopen file without O_EXCL
                    fd = os.open(self.pid, flags ^ os.O_EXCL, mode)
            except (OSError, IOError, ValueError):
                pass

        try:
            f = os.fdopen(fd, 'w')
            line = "%d\n" % ownid
            f.write(line)
            f.close()
            self._saved_pid = True
        except IOError:
            self.logger.daemon("Can not create PIDfile %s" % self.pid,
                               'warning')
            return False
        self.logger.daemon(
            "Created PIDfile %s with value %d" % (self.pid, ownid), 'warning')
        return True

    def removepid(self):
        if not self.pid or not self._saved_pid:
            return
        try:
            os.remove(self.pid)
        except OSError, exc:
            if exc.errno == errno.ENOENT:
                pass
            else:
                self.logger.daemon("Can not remove PIDfile %s" % self.pid,
                                   'error')
                return
        self.logger.daemon("Removed PIDfile %s" % self.pid)
Ejemplo n.º 56
0
class Processes(object):
    # how many time can a process can respawn in the time interval
    respawn_timemask = 0xFFFFFF - 0b111111
    # '0b111111111111111111000000' (around a minute, 63 seconds)

    _dispatch = {}

    def __init__(self):
        self.logger = Logger()
        self.clean()
        self.silence = False
        self._buffer = {}
        self._configuration = {}

        self.respawn_number = 5 if environment.settings().api.respawn else 0
        self.terminate_on_error = environment.settings().api.terminate
        self.ack = environment.settings().api.ack

    def number(self):
        return len(self._process)

    def clean(self):
        self.fds = []
        self._process = {}
        self._encoder = {}
        self._broken = []
        self._respawning = {}

    def _handle_problem(self, process):
        if process not in self._process:
            return
        if self.respawn_number:
            self.logger.debug('issue with the process, restarting it',
                              'process')
            self._terminate(process)
            self._start(process)
        else:
            self.logger.debug('issue with the process, terminating it',
                              'process')
            self._terminate(process)

    def _terminate(self, process_name):
        self.logger.debug('terminating process %s' % process_name, 'process')
        process = self._process[process_name]
        del self._process[process_name]
        self._update_fds()
        thread = Thread(target=self._terminate_run, args=(process, ))
        thread.start()
        return thread

    def _terminate_run(self, process):
        try:
            process.terminate()
            process.wait()
        except (OSError, KeyError):
            # the process is most likely already dead
            pass

    def terminate(self):
        for process in list(self._process):
            if not self.silence:
                try:
                    self.write(process, self._encoder[process].shutdown())
                except ProcessError:
                    pass
        self.silence = True
        # waiting a little to make sure IO is flushed to the pipes
        # we are using unbuffered IO but still ..
        time.sleep(0.1)
        for process in list(self._process):
            try:
                t = self._terminate(process)
                t.join()
            except OSError:
                # we most likely received a SIGTERM signal and our child is already dead
                self.logger.debug(
                    'child process %s was already dead' % process, 'process')
        self.clean()

    def _start(self, process):
        try:
            if process in self._process:
                self.logger.debug('process already running', 'process')
                return
            if process not in self._configuration:
                self.logger.debug(
                    'can not start process, no configuration for it',
                    'process')
                return
            # Prevent some weird termcap data to be created at the start of the PIPE
            # \x1b[?1034h (no-eol) (esc)
            os.environ['TERM'] = 'dumb'

            configuration = self._configuration[process]

            run = configuration.get('run', '')
            if run:
                api = configuration.get('encoder', '')
                self._encoder[process] = Response.Text(
                    text_version) if api == 'text' else Response.JSON(
                        json_version)

                self._process[process] = subprocess.Popen(
                    run,
                    stdin=subprocess.PIPE,
                    stdout=subprocess.PIPE,
                    preexec_fn=preexec_helper
                    # This flags exists for python 2.7.3 in the documentation but on on my MAC
                    # creationflags=subprocess.CREATE_NEW_PROCESS_GROUP
                )
                self._update_fds()
                fcntl.fcntl(self._process[process].stdout.fileno(),
                            fcntl.F_SETFL, os.O_NONBLOCK)

                self.logger.debug('forked process %s' % process, 'process')

                around_now = int(time.time()) & self.respawn_timemask
                if process in self._respawning:
                    if around_now in self._respawning[process]:
                        self._respawning[process][around_now] += 1
                        # we are respawning too fast
                        if self._respawning[process][
                                around_now] > self.respawn_number:
                            self.logger.critical(
                                'Too many death for %s (%d) terminating program'
                                % (process, self.respawn_number), 'process')
                            raise ProcessError()
                    else:
                        # reset long time since last respawn
                        self._respawning[process] = {around_now: 1}
                else:
                    # record respawing
                    self._respawning[process] = {around_now: 1}

        except (subprocess.CalledProcessError, OSError, ValueError) as exc:
            self._broken.append(process)
            self.logger.debug('could not start process %s' % process,
                              'process')
            self.logger.debug('reason: %s' % str(exc), 'process')

    def start(self, configuration, restart=False):
        for process in list(self._process):
            if process not in configuration:
                self._terminate(process)
        self._configuration = configuration
        for process in configuration:
            if restart and process in list(self._process):
                self._terminate(process)
            self._start(process)

    def broken(self, neighbor):
        if self._broken:
            for process in self._configuration:
                if process in self._broken:
                    return True
        return False

    def _update_fds(self):
        self.fds = [
            self._process[process].stdout.fileno() for process in self._process
        ]

    def received(self):
        consumed_data = False

        for process in list(self._process):
            try:
                proc = self._process[process]
                poll = proc.poll()
                # proc.poll returns None if the process is still fine
                # -[signal], like -15, if the process was terminated
                if poll is not None:
                    self._handle_problem(process)
                    return

                poller = select.poll()
                poller.register(
                    proc.stdout, select.POLLIN | select.POLLPRI
                    | select.POLLHUP | select.POLLNVAL | select.POLLERR)

                ready = False
                for _, event in poller.poll(0):
                    if event & select.POLLIN or event & select.POLLPRI:
                        ready = True
                    elif event & select.POLLHUP:
                        raise KeyboardInterrupt()
                    elif event & select.POLLERR or event & select.POLLNVAL:
                        self._handle_problem(process)

                if not ready:
                    continue

                try:
                    # Calling next() on Linux and OSX works perfectly well
                    # but not on OpenBSD where it always raise StopIteration
                    # and only readline() works
                    buf = str_ascii(proc.stdout.read(16384))
                    if buf == '' and poll is not None:
                        # if proc.poll() is None then
                        # process is fine, we received an empty line because
                        # we're doing .readline() on a non-blocking pipe and
                        # the process maybe has nothing to send yet
                        self._handle_problem(process)
                        continue

                    raw = self._buffer.get(process, '') + buf

                    while '\n' in raw:
                        line, raw = raw.split('\n', 1)
                        line = line.rstrip()
                        consumed_data = True
                        self.logger.debug(
                            'command from process %s : %s ' % (process, line),
                            'process')
                        yield (process, formated(line))

                    self._buffer[process] = raw

                except IOError as exc:
                    if not exc.errno or exc.errno in error.fatal:
                        # if the program exits we can get an IOError with errno code zero !
                        self._handle_problem(process)
                    elif exc.errno in error.block:
                        # we often see errno.EINTR: call interrupted and
                        # we most likely have data, we will try to read them a the next loop iteration
                        pass
                    else:
                        self.logger.debug(
                            'unexpected errno received from forked process (%s)'
                            % errstr(exc), 'process')
                except StopIteration:
                    if not consumed_data:
                        self._handle_problem(process)
            except KeyError:
                pass
            except (subprocess.CalledProcessError, OSError, ValueError):
                self._handle_problem(process)

    def write(self, process, string, neighbor=None):
        if string is None:
            return True

        # XXX: FIXME: This is potentially blocking
        while True:
            try:
                self._process[process].stdin.write(bytes_ascii('%s\n' %
                                                               string))
            except IOError as exc:
                self._broken.append(process)
                if exc.errno == errno.EPIPE:
                    self._broken.append(process)
                    self.logger.debug(
                        'issue while sending data to our helper program',
                        'process')
                    raise ProcessError()
                else:
                    # Could it have been caused by a signal ? What to do.
                    self.logger.debug(
                        'error received while sending data to helper program, retrying (%s)'
                        % errstr(exc), 'process')
                    continue
            break

        try:
            self._process[process].stdin.flush()
        except IOError as exc:
            # AFAIK, the buffer should be flushed at the next attempt.
            self.logger.debug(
                'error received while FLUSHING data to helper program, retrying (%s)'
                % errstr(exc), 'process')

        return True

    def _answer(self, service, string, force=False):
        if force or self.ack:
            self.logger.debug(
                'responding to %s : %s' %
                (service, string.replace('\n', '\\n')), 'process')
            self.write(service, string)

    def answer_done(self, service):
        self._answer(service, Answer.done)

    def answer_error(self, service):
        self._answer(service, Answer.error)

    def _notify(self, neighbor, event):
        for process in neighbor.api[event]:
            yield process

    # do not do anything if silenced
    # no-self-argument

    def silenced(function):
        def closure(self, *args):
            if self.silence:
                return
            return function(self, *args)

        return closure

    # invalid-name
    @silenced
    def up(self, neighbor):
        for process in self._notify(neighbor, 'neighbor-changes'):
            self.write(process, self._encoder[process].up(neighbor), neighbor)

    @silenced
    def connected(self, neighbor):
        for process in self._notify(neighbor, 'neighbor-changes'):
            self.write(process, self._encoder[process].connected(neighbor),
                       neighbor)

    @silenced
    def down(self, neighbor, reason):
        for process in self._notify(neighbor, 'neighbor-changes'):
            self.write(process, self._encoder[process].down(neighbor, reason),
                       neighbor)

    @silenced
    def negotiated(self, neighbor, negotiated):
        for process in self._notify(neighbor, 'negotiated'):
            self.write(process,
                       self._encoder[process].negotiated(neighbor,
                                                         negotiated), neighbor)

    @silenced
    def fsm(self, neighbor, fsm):
        for process in self._notify(neighbor, 'fsm'):
            self.write(process, self._encoder[process].fsm(neighbor, fsm),
                       neighbor)

    @silenced
    def signal(self, neighbor, signal):
        for process in self._notify(neighbor, 'signal'):
            self.write(process,
                       self._encoder[process].signal(neighbor,
                                                     signal), neighbor)

    @silenced
    def packets(self, neighbor, direction, category, header, body):
        for process in self._notify(neighbor, '%s-packets' % direction):
            self.write(
                process,
                self._encoder[process].packets(neighbor, direction, category,
                                               header, body), neighbor)

    @silenced
    def notification(self, neighbor, direction, code, subcode, data, header,
                     body):
        for process in self._notify(neighbor, 'neighbor-changes'):
            self.write(
                process,
                self._encoder[process].notification(neighbor, direction, code,
                                                    subcode, data, header,
                                                    body), neighbor)

    @silenced
    def message(self, message_id, neighbor, direction, message, negotiated,
                header, *body):
        self._dispatch[message_id](self, neighbor, direction, message,
                                   negotiated, header, *body)

    # registering message functions
    # no-self-argument

    def register_process(message_id, storage=_dispatch):
        def closure(function):
            def wrap(*args):
                function(*args)

            storage[message_id] = wrap
            return wrap

        return closure

    # notifications are handled in the loop as they use different arguments

    @register_process(Message.CODE.OPEN)
    def _open(self, peer, direction, message, negotiated, header, body):
        for process in self._notify(
                peer, '%s-%s' % (direction, Message.CODE.OPEN.SHORT)):
            self.write(
                process, self._encoder[process].open(peer, direction, message,
                                                     negotiated, header, body),
                peer)

    @register_process(Message.CODE.UPDATE)
    def _update(self, peer, direction, update, negotiated, header, body):
        for process in self._notify(
                peer, '%s-%s' % (direction, Message.CODE.UPDATE.SHORT)):
            self.write(
                process,
                self._encoder[process].update(peer, direction, update,
                                              negotiated, header, body), peer)

    @register_process(Message.CODE.NOTIFICATION)
    def _notification(self, peer, direction, message, negotiated, header,
                      body):
        for process in self._notify(
                peer, '%s-%s' % (direction, Message.CODE.NOTIFICATION.SHORT)):
            self.write(
                process,
                self._encoder[process].notification(peer, direction, message,
                                                    negotiated, header, body),
                peer)

    # unused-argument, must keep the API
    @register_process(Message.CODE.KEEPALIVE)
    def _keepalive(self, peer, direction, keepalive, negotiated, header, body):
        for process in self._notify(
                peer, '%s-%s' % (direction, Message.CODE.KEEPALIVE.SHORT)):
            self.write(
                process,
                self._encoder[process].keepalive(peer, direction, negotiated,
                                                 header, body), peer)

    @register_process(Message.CODE.ROUTE_REFRESH)
    def _refresh(self, peer, direction, refresh, negotiated, header, body):
        for process in self._notify(
                peer, '%s-%s' % (direction, Message.CODE.ROUTE_REFRESH.SHORT)):
            self.write(
                process,
                self._encoder[process].refresh(peer, direction, refresh,
                                               negotiated, header, body), peer)

    @register_process(Message.CODE.OPERATIONAL)
    def _operational(self, peer, direction, operational, negotiated, header,
                     body):
        for process in self._notify(
                peer, '%s-%s' % (direction, Message.CODE.OPERATIONAL.SHORT)):
            self.write(
                process,
                self._encoder[process].operational(peer, direction,
                                                   operational.category,
                                                   operational, negotiated,
                                                   header, body), peer)
Ejemplo n.º 57
0
def run(env, comment, configurations, root, validate, pid=0):
    logger = Logger()

    logger.notice('Thank you for using ExaBGP', 'welcome')
    logger.notice('%s' % version, 'version')
    logger.notice('%s' % sys.version.replace('\n', ' '), 'interpreter')
    logger.notice('%s' % ' '.join(platform.uname()[:5]), 'os')
    logger.notice('%s' % root, 'installation')

    if comment:
        logger.notice(comment, 'advice')

    warning = warn()
    if warning:
        logger.warning(warning, 'advice')

    if env.api.cli:
        pipename = 'exabgp' if env.api.pipename is None else env.api.pipename
        pipes = named_pipe(root, pipename)
        if len(pipes) != 1:
            env.api.cli = False
            logger.error(
                'could not find the named pipes (%s.in and %s.out) required for the cli'
                % (pipename, pipename), 'cli')
            logger.error(
                'we scanned the following folders (the number is your PID):',
                'cli')
            for location in pipes:
                logger.error(' - %s' % location, 'cli control')
            logger.error(
                'please make them in one of the folder with the following commands:',
                'cli control')
            logger.error(
                '> mkfifo %s/run/%s.{in,out}' % (os.getcwd(), pipename),
                'cli control')
            logger.error(
                '> chmod 600 %s/run/%s.{in,out}' % (os.getcwd(), pipename),
                'cli control')
            if os.getuid() != 0:
                logger.error(
                    '> chown %d:%d %s/run/%s.{in,out}' %
                    (os.getuid(), os.getgid(), os.getcwd(), pipename),
                    'cli control')
        else:
            pipe = pipes[0]
            os.environ['exabgp_cli_pipe'] = pipe
            os.environ['exabgp_api_pipename'] = pipename

            logger.info('named pipes for the cli are:', 'cli control')
            logger.info('to send commands  %s%s.in' % (pipe, pipename),
                        'cli control')
            logger.info('to read responses %s%s.out' % (pipe, pipename),
                        'cli control')

    if not env.profile.enable:
        exit_code = Reactor(configurations).run(validate, root)
        __exit(env.debug.memory, exit_code)

    try:
        import cProfile as profile
    except ImportError:
        import profile

    if env.profile.file == 'stdout':
        profiled = 'Reactor(%s).run(%s,"%s")' % (str(configurations),
                                                 str(validate), str(root))
        exit_code = profile.run(profiled)
        __exit(env.debug.memory, exit_code)

    if pid:
        profile_name = "%s-pid-%d" % (env.profile.file, pid)
    else:
        profile_name = env.profile.file

    notice = ''
    if os.path.isdir(profile_name):
        notice = 'profile can not use this filename as output, it is not a directory (%s)' % profile_name
    if os.path.exists(profile_name):
        notice = 'profile can not use this filename as output, it already exists (%s)' % profile_name

    if not notice:
        cwd = os.getcwd()
        logger.debug('profiling ....', 'reactor')
        profiler = profile.Profile()
        profiler.enable()
        try:
            exit_code = Reactor(configurations).run(validate, root)
        except Exception:
            exit_code = Reactor.Exit.unknown
            raise
        finally:
            from exabgp.vendoring import lsprofcalltree
            profiler.disable()
            kprofile = lsprofcalltree.KCacheGrind(profiler)
            try:
                destination = profile_name if profile_name.startswith(
                    '/') else os.path.join(cwd, profile_name)
                with open(destination, 'w+') as write:
                    kprofile.output(write)
            except IOError:
                notice = 'could not save profiling in formation at: ' + destination
                logger.debug("-" * len(notice), 'reactor')
                logger.debug(notice, 'reactor')
                logger.debug("-" * len(notice), 'reactor')
            __exit(env.debug.memory, exit_code)
    else:
        logger.debug("-" * len(notice), 'reactor')
        logger.debug(notice, 'reactor')
        logger.debug("-" * len(notice), 'reactor')
        Reactor(configurations).run(validate, root)
        __exit(env.debug.memory, 1)
Ejemplo n.º 58
0
def main():
    major = int(sys.version[0])
    minor = int(sys.version[2])

    if major <= 2 and minor < 5:
        sys.stdout.write(
            'This program can not work (is not tested) with your python version (< 2.5)\n'
        )
        sys.stdout.flush()
        sys.exit(1)

    cli_named_pipe = os.environ.get('exabgp_cli_pipe', '')
    if cli_named_pipe:
        from exabgp.application.control import main as control
        control(cli_named_pipe)
        sys.exit(0)

    options = docopt.docopt(usage, help=False)

    if options["--run"]:
        sys.argv = sys.argv[sys.argv.index('--run') + 1:]
        if sys.argv[0] == 'healthcheck':
            from exabgp.application import run_healthcheck
            run_healthcheck()
        elif sys.argv[0] == 'cli':
            from exabgp.application import run_cli
            run_cli()
        else:
            sys.stdout.write(usage)
            sys.stdout.flush()
            sys.exit(0)
        return

    root = root_folder(options, [
        '/bin/exabgp', '/sbin/exabgp', '/lib/exabgp/application/bgp.py',
        '/lib/exabgp/application/control.py'
    ])
    prefix = '' if root == '/usr' else root
    etc = prefix + '/etc/exabgp'

    os.environ['EXABGP_ETC'] = etc  # This is not most pretty

    if options["--version"]:
        sys.stdout.write('ExaBGP : %s\n' % version)
        sys.stdout.write('Python : %s\n' % sys.version.replace('\n', ' '))
        sys.stdout.write('Uname  : %s\n' % ' '.join(platform.uname()[:5]))
        sys.stdout.write('Root   : %s\n' % root)
        sys.stdout.flush()
        sys.exit(0)

    envfile = get_envfile(options, etc)
    env = get_env(envfile)

    # Must be done before setting the logger as it modify its behaviour
    if options["--debug"]:
        env.log.all = True
        env.log.level = syslog.LOG_DEBUG

    logger = Logger()

    from exabgp.configuration.setup import environment

    if options["--decode"]:
        decode = ''.join(options["--decode"]).replace(':', '').replace(' ', '')
        if not is_bgp(decode):
            sys.stdout.write(usage)
            sys.stdout.write('Environment values are:\n%s\n\n' %
                             '\n'.join(' - %s' % _
                                       for _ in environment.default()))
            sys.stdout.write(
                'The BGP message must be an hexadecimal string.\n\n')
            sys.stdout.write(
                'All colons or spaces are ignored, for example:\n\n')
            sys.stdout.write('  --decode 001E0200000007900F0003000101\n')
            sys.stdout.write(
                '  --decode 001E:02:0000:0007:900F:0003:0001:01\n')
            sys.stdout.write(
                '  --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101\n'
            )
            sys.stdout.write(
                '  --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01\n'
            )
            sys.stdout.write(
                '  --decode \'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101\n\''
            )
            sys.stdout.flush()
            sys.exit(1)
    else:
        decode = ''

    duration = options["--signal"]
    if duration and duration.isdigit():
        pid = os.fork()
        if pid:
            import time
            import signal
            try:
                time.sleep(int(duration))
                os.kill(pid, signal.SIGUSR1)
            except KeyboardInterrupt:
                pass
            try:
                pid, code = os.wait()
                sys.exit(code)
            except KeyboardInterrupt:
                try:
                    pid, code = os.wait()
                    sys.exit(code)
                except Exception:
                    sys.exit(0)

    if options["--help"]:
        sys.stdout.write(usage)
        sys.stdout.write('Environment values are:\n' +
                         '\n'.join(' - %s' % _ for _ in environment.default()))
        sys.stdout.flush()
        sys.exit(0)

    if options["--decode"]:
        env.log.parser = True
        env.debug.route = decode
        env.tcp.bind = ''

    if options["--profile"]:
        env.profile.enable = True
        if options["--profile"].lower() in ['1', 'true']:
            env.profile.file = True
        elif options["--profile"].lower() in ['0', 'false']:
            env.profile.file = False
        else:
            env.profile.file = options["--profile"]

    if envfile and not os.path.isfile(envfile):
        comment = 'environment file missing\ngenerate it using "exabgp --fi > %s"' % envfile
    else:
        comment = ''

    if options["--full-ini"] or options["--fi"]:
        for line in environment.iter_ini():
            sys.stdout.write('%s\n' % line)
            sys.stdout.flush()
        sys.exit(0)

    if options["--full-env"] or options["--fe"]:
        print()
        for line in environment.iter_env():
            sys.stdout.write('%s\n' % line)
            sys.stdout.flush()
        sys.exit(0)

    if options["--diff-ini"] or options["--di"]:
        for line in environment.iter_ini(True):
            sys.stdout.write('%s\n' % line)
            sys.stdout.flush()
        sys.exit(0)

    if options["--diff-env"] or options["--de"]:
        for line in environment.iter_env(True):
            sys.stdout.write('%s\n' % line)
            sys.stdout.flush()
        sys.exit(0)

    if options["--once"]:
        env.tcp.once = True

    if options["--pdb"]:
        # The following may fail on old version of python (but is required for debug.py)
        os.environ['PDB'] = 'true'
        env.debug.pdb = True

    if options["--test"]:
        env.debug.selfcheck = True
        env.log.parser = True

    if options["--memory"]:
        env.debug.memory = True

    configurations = []
    # check the file only once that we have parsed all the command line options and allowed them to run
    if options["<configuration>"]:
        for f in options["<configuration>"]:
            # some users are using symlinks for atomic change of the configuration file
            # using mv may however be better practice :p
            normalised = os.path.realpath(os.path.normpath(f))
            target = os.path.realpath(normalised)
            if os.path.isfile(target):
                configurations.append(normalised)
                continue
            if f.startswith('etc/exabgp'):
                normalised = os.path.join(etc, f[11:])
                if os.path.isfile(normalised):
                    configurations.append(normalised)
                    continue

            logger.debug(
                'one of the arguments passed as configuration is not a file (%s)'
                % f, 'configuration')
            sys.exit(1)

    else:
        sys.stdout.write(usage)
        sys.stdout.write('Environment values are:\n%s\n\n' %
                         '\n'.join(' - %s' % _ for _ in environment.default()))
        sys.stdout.write('no configuration file provided')
        sys.stdout.flush()
        sys.exit(1)

    from exabgp.bgp.message.update.attribute import Attribute
    Attribute.caching = env.cache.attributes

    if env.debug.rotate or len(configurations) == 1:
        run(env, comment, configurations, root, options["--validate"])

    if not (env.log.destination in ('syslog', 'stdout', 'stderr')
            or env.log.destination.startswith('host:')):
        logger.error(
            'can not log to files when running multiple configuration (as we fork)',
            'configuration')
        sys.exit(1)

    try:
        # run each configuration in its own process
        pids = []
        for configuration in configurations:
            pid = os.fork()
            if pid == 0:
                run(env, comment, [configuration], root, options["--validate"],
                    os.getpid())
            else:
                pids.append(pid)

        # If we get a ^C / SIGTERM, ignore just continue waiting for our child process
        import signal
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        # wait for the forked processes
        for pid in pids:
            os.waitpid(pid, 0)
    except OSError as exc:
        logger.critical(
            'can not fork, errno %d : %s' % (exc.errno, exc.strerror),
            'reactor')
        sys.exit(1)
Ejemplo n.º 59
0
    def unpack_message(cls, data, negotiated):
        logger = Logger()

        length = len(data)

        # This could be speed up massively by changing the order of the IF
        if length == 4 and data == '\x00\x00\x00\x00':
            return EOR(AFI.ipv4, SAFI.unicast, IN.ANNOUNCED)  # pylint: disable=E1101
        if length == 11 and data.startswith(EOR.NLRI.PREFIX):
            return EOR.unpack_message(data, negotiated)

        withdrawn, _attributes, announced = cls.split(data)
        attributes = Attributes.unpack(_attributes, negotiated)

        if not withdrawn:
            logger.parser("no withdrawn NLRI")
        if not announced:
            logger.parser("no announced NLRI")

        # Is the peer going to send us some Path Information with the route (AddPath)
        addpath = negotiated.addpath.receive(AFI(AFI.ipv4), SAFI(SAFI.unicast))

        # empty string for NoIP, the packed IP otherwise (without the 3/4 bytes of attributes headers)
        _nexthop = attributes.get(Attribute.CODE.NEXT_HOP, NoIP)
        nexthop = _nexthop.packed

        # XXX: NEXTHOP MUST NOT be the IP address of the receiving speaker.

        nlris = []
        while withdrawn:
            length, nlri = NLRI.unpack(AFI.ipv4, SAFI.unicast, withdrawn,
                                       addpath, nexthop, IN.WITHDRAWN)
            logger.parser(
                LazyFormat("parsed withdraw nlri %s payload " % nlri,
                           withdrawn[:len(nlri)]))
            withdrawn = withdrawn[length:]
            nlris.append(nlri)

        while announced:
            length, nlri = NLRI.unpack(AFI.ipv4, SAFI.unicast, announced,
                                       addpath, nexthop, IN.ANNOUNCED)
            logger.parser(
                LazyFormat("parsed announce nlri %s payload " % nlri,
                           announced[:len(nlri)]))
            announced = announced[length:]
            nlris.append(nlri)

        # required for 'is' comparaison
        UNREACH = [
            EMPTY_MPURNLRI,
        ]
        REACH = [
            EMPTY_MPRNLRI,
        ]

        unreach = attributes.pop(MPURNLRI.ID, UNREACH)
        reach = attributes.pop(MPRNLRI.ID, REACH)

        for mpr in unreach:
            nlris.extend(mpr.nlris)

        for mpr in reach:
            nlris.extend(mpr.nlris)

        if not attributes and not nlris:
            # Careful do not use == or != as the comparaison does not work
            if unreach is UNREACH and reach is REACH:
                return EOR(AFI(AFI.ipv4), SAFI(SAFI.unicast))
            if unreach is not UNREACH:
                return EOR(unreach[0].afi, unreach[0].safi)
            if reach is not REACH:
                return EOR(reach[0].afi, reach[0].safi)
            raise RuntimeError('This was not expected')

        return Update(nlris, attributes)
Ejemplo n.º 60
0
    def unpack_message(cls, data, negotiated):
        logger = Logger()

        logger.parser(LazyFormat("parsing UPDATE", data))

        length = len(data)

        # This could be speed up massively by changing the order of the IF
        if length == 4 and data == b'\x00\x00\x00\x00':
            return EOR(AFI.ipv4, SAFI.unicast)  # pylint: disable=E1101
        if length == 11 and data.startswith(EOR.NLRI.PREFIX):
            return EOR.unpack_message(data, negotiated)

        withdrawn, _attributes, announced = cls.split(data)

        if not withdrawn:
            logger.parser("withdrawn NLRI none")

        attributes = Attributes.unpack(_attributes, negotiated)

        if not announced:
            logger.parser("announced NLRI none")

        # Is the peer going to send us some Path Information with the route (AddPath)
        addpath = negotiated.addpath.receive(AFI.ipv4, SAFI.unicast)

        # empty string for NoNextHop, the packed IP otherwise (without the 3/4 bytes of attributes headers)
        nexthop = attributes.get(Attribute.CODE.NEXT_HOP, NoNextHop)
        # nexthop = NextHop.unpack(_nexthop.ton())

        # XXX: NEXTHOP MUST NOT be the IP address of the receiving speaker.

        nlris = []
        while withdrawn:
            nlri, left = NLRI.unpack_nlri(AFI.ipv4, SAFI.unicast, withdrawn,
                                          IN.WITHDRAWN, addpath)
            logger.parser("withdrawn NLRI %s" % nlri)
            withdrawn = left
            nlris.append(nlri)

        while announced:
            nlri, left = NLRI.unpack_nlri(AFI.ipv4, SAFI.unicast, announced,
                                          IN.ANNOUNCED, addpath)
            nlri.nexthop = nexthop
            logger.parser("announced NLRI %s" % nlri)
            announced = left
            nlris.append(nlri)

        unreach = attributes.pop(MPURNLRI.ID, None)
        reach = attributes.pop(MPRNLRI.ID, None)

        if unreach is not None:
            nlris.extend(unreach.nlris)

        if reach is not None:
            nlris.extend(reach.nlris)

        if not attributes and not nlris:
            # Careful do not use == or != as the comparaison does not work
            if unreach is None and reach is None:
                return EOR(AFI.ipv4, SAFI.unicast)
            if unreach is not None:
                return EOR(unreach.afi, unreach.safi)
            if reach is not None:
                return EOR(reach.afi, reach.safi)
            raise RuntimeError('This was not expected')

        return Update(nlris, attributes)