def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('DHCPAgent') # grab relevant settings queue_expire = int(self.config.get('queue_expire', 60)) # Initialize logger self.logger = Logger(name='dhcpagent', level=self.config['loglevel'], handler=handler) # Parse agent conf self.qconfig = Config(self.config['conffile'], 'AGENT') # Initialize super NeutronAgent.__init__(self, self.qconfig, self.config, 'DHCP agent') # Initialize RPC bits RPC.__init__(self, connection, exopts={ 'name': self.event_queue(), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-dhcp_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin', 'queue_arguments': { 'x-expires': queue_expire * 1000, } })
def __init__(self, connection, plugins=[], handler=None): Thread.__init__(self, target=self.run) # MRO picks mixin.run self.connection = connection self.is_connected = True self.plugins = plugins self.queues = [plugin.queue for plugin in plugins] self.callbacks = [plugin.update for plugin in plugins] self.logger = Logger(name='consumer', handler=handler)
def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('Dump') # Initialize logger self.logger = Logger( name='dump', level=self.config['loglevel'], handler=handler ) # Initialize RPC bits RPC.__init__( self, connection, exopts={ 'name': self.config.get('queue', 'neutron'), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-dump_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin' } )
def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section("DHCPAgent") # grab relevant settings queue_expire = int(self.config.get("queue_expire", 60)) # Initialize logger self.logger = Logger(name="dhcpagent", level=self.config["loglevel"], handler=handler) # Parse agent conf self.qconfig = Config(self.config["conffile"], "AGENT") # Initialize super NeutronAgent.__init__(self, self.qconfig, self.config, "DHCP agent") # Initialize RPC bits RPC.__init__( self, connection, exopts={"name": self.event_queue(), "durable": False, "type": "topic"}, qopts={ "name": "rpcdaemon-dhcp_%s" % uuid4(), "auto_delete": True, "durable": False, "routing_key": "q-plugin", "queue_arguments": {"x-expires": queue_expire * 1000}, }, )
class Dump(RPC): def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('Dump') # Initialize logger self.logger = Logger( name='dump', level=self.config['loglevel'], handler=handler ) # Initialize RPC bits RPC.__init__( self, connection, exopts={ 'name': self.config.get('queue', 'neutron'), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-dump_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin' } ) def check(self): pass def update(self, body, message): if 'oslo.message' in body: body = json.loads(body['oslo.message']) filter_string = self.config._config.get('filter', 'True') if eval(filter_string): self.logger.debug(json.dumps(body, indent=2, sort_keys=True)) message.ack()
class Worker(ConsumerMixin, Thread): def __init__(self, connection, plugins=[], handler=None): Thread.__init__(self, target=self.run) # MRO picks mixin.run self.connection = connection self.is_connected = True self.plugins = plugins self.queues = [plugin.queue for plugin in plugins] self.callbacks = [plugin.update for plugin in plugins] self.logger = Logger(name='consumer', handler=handler) def on_connection_error(self, exc, interval): if self.is_connected is True: self.logger.warn('Retrying AMQP connection') self.is_connected = False # force a reconnect, rather than using old connection self._default_channel = None if self.should_stop: self.logger.warn('Disconnected AMQP') else: self.connection.ensure_connection() def on_connection_revived(self): if self.is_connected is False: self.logger.warn('AMQP connection re-established') self.is_connected = True def get_consumers(self, Consumer, channel): return [ Consumer(queues=[queue], callbacks=[callback]) for (queue, callback) in zip(self.queues, self.callbacks) ]
def __init__(self, args=None): # Parse args if args is None: args = {} options, _ = getopt.getopt(sys.argv[1:], 'c:d') options = dict(options) config_file = options.get('-c', '/usr/local/etc/rpcdaemon.conf') daemonize = '-d' not in options # Parse config self.config = Config(config_file, 'Daemon') # Initialize logger self.logger = Logger( name='rpcdaemon', level = self.config['loglevel'], path = self.config['logfile'] if daemonize else None, handler = None if daemonize else logging.StreamHandler() ) self.pidfile = PIDFile(self.config['pidfile']) if daemonize else None; # TOOD: plugin.check thread pool? self.timeout = int(self.config.get('check_interval', 1)) # Clamp in case we exit before worker exists self.worker = None # Initialize daemon DaemonContext.__init__( self, detach_process=daemonize, files_preserve=[self.logger.handler.stream.fileno()], pidfile=self.pidfile, stdout=self.logger.handler.stream, stderr=self.logger.handler.stream )
class Dump(RPC): def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('Dump') # Initialize logger self.logger = Logger(name='dump', level=self.config['loglevel'], handler=handler) # Initialize RPC bits RPC.__init__(self, connection, exopts={ 'name': self.config.get('queue', 'neutron'), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-dump_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin' }) def check(self): pass def update(self, body, message): if 'oslo.message' in body: body = json.loads(body['oslo.message']) filter_string = self.config._config.get('filter', 'True') if eval(filter_string): self.logger.debug(json.dumps(body, indent=2, sort_keys=True)) message.ack()
def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('Dump') # Initialize logger self.logger = Logger(name='dump', level=self.config['loglevel'], handler=handler) # Initialize RPC bits RPC.__init__(self, connection, exopts={ 'name': self.config.get('queue', 'neutron'), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-dump_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin' })
def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('L3Agent') # grab relevant settings queue_expire = int(self.config.get('queue_expire', 60)) # Initialize logger self.logger = Logger( name='l3agent', level=self.config['loglevel'], handler=handler ) # Parse agent config self.qconfig = Config(self.config['conffile'], 'AGENT') # Initialize super NeutronAgent.__init__(self, self.qconfig, self.config, 'L3 agent') # Initialize RPC bits RPC.__init__( self, connection, exopts={ 'name': self.event_queue(), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-l3_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin', 'queue_arguments': { 'x-expires': queue_expire * 1000, } } )
class L3Agent(NeutronAgent, RPC): def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('L3Agent') # grab relevant settings queue_expire = int(self.config.get('queue_expire', 60)) # Initialize logger self.logger = Logger( name='l3agent', level=self.config['loglevel'], handler=handler ) # Parse agent config self.qconfig = Config(self.config['conffile'], 'AGENT') # Initialize super NeutronAgent.__init__(self, self.qconfig, self.config, 'L3 agent') # Initialize RPC bits RPC.__init__( self, connection, exopts={ 'name': self.event_queue(), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-l3_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin', 'queue_arguments': { 'x-expires': queue_expire * 1000, } } ) # L3 specific handler def handle(self, agent, state): # All alive agents targets = dict([(target['id'], target) for target in self.agents.values() if target['alive']]) # If agent is down, remove routers first if not state: routerlist = self.retryable( lambda: self.client.list_routers_on_l3_agent(agent['id']))['routers'] for router in routerlist: self.logger.info( 'Removing router %s from %s/%s [%s]' % ( router['id'], agent['host'], agent['agent_type'], str(agent['id']) ) ) self.retryable( lambda: self.client.remove_router_from_l3_agent(agent['id'], router['id'])) self.logger.debug('Targets: %s' % targets.keys()) # get all routers routerlist = self.retryable( lambda: self.client.list_routers())['routers'] # Get routers on agents binds = dict([(router['id'], router) for target in targets for router in self.retryable(lambda: self.client.list_routers_on_l3_agent(target))['routers']]) self.logger.debug('Bound Routers: %s' % binds.keys()) # And routers not on agents routers = dict([(router['id'], router) for router in routerlist if not router['id'] in binds]) self.logger.debug('Free Routers: %s' % routers.keys()) # Map free routers to agents mapping = zip(routers, cycle(targets)) self.logger.debug('Mapping: %s' % mapping) # Any agents alive? if targets: # Schedule routers to them for router, target in mapping: self.logger.info( 'Scheduling %s [%s] -> %s/%s [%s].' % ( routers[router]['name'], str(router), targets[target]['host'], targets[target]['agent_type'], str(target) ) ) # this can cause errors if multiple rpcdaemons are running msg = 'Router %s already added to agent %s' % (router, target) self.retryable( lambda: self.client.add_router_to_l3_agent( target, {'router_id': router}), retries=1, delay=0, on_fail=lambda x:self.logger.warn(msg)) # No agents, any routers? elif routers: self.logger.warn('No agents found to schedule routers to.')
class DHCPAgent(NeutronAgent, RPC): def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('DHCPAgent') # grab relevant settings queue_expire = int(self.config.get('queue_expire', 60)) # Initialize logger self.logger = Logger(name='dhcpagent', level=self.config['loglevel'], handler=handler) # Parse agent conf self.qconfig = Config(self.config['conffile'], 'AGENT') # Initialize super NeutronAgent.__init__(self, self.qconfig, self.config, 'DHCP agent') # Initialize RPC bits RPC.__init__(self, connection, exopts={ 'name': self.event_queue(), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-dhcp_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin', 'queue_arguments': { 'x-expires': queue_expire * 1000, } }) # DHCP specific handler def handle(self, agent, state): # All alive agents targets = dict([(target['id'], target) for target in self.agents.values() if target['alive']]) if not targets: self.logger.debug('No agents up; exiting handler early.') return networklist = self.retryable( lambda: self.client.list_networks_on_dhcp_agent(agent['id'] ))['networks'] # If agent is down, remove networks first if not state: for network in networklist: self.logger.info('Removing network %s from %s/%s [%s]' % (network['id'], agent['host'], agent['agent_type'], str(agent['id']))) # Races between multiple rpc agents can make this # crash msg = 'Network %s already removed from agent %s' % ( network['id'], agent['id']) self.retryable( lambda: self.client.remove_network_from_dhcp_agent( agent['id'], network['id']), retries=1, delay=0, on_fail=lambda x: self.logger.warn(msg)) self.logger.debug('Targets: %s' % targets.keys()) # Get all networks networks = dict([(network['id'], network) for network in networklist]) self.logger.debug('All Networks: %s' % networks.keys()) # Map agents to missing networks mapping = dict([(target, [ missing for missing in networks if missing not in [ network['id'] for network in self.retryable( lambda: self.client.list_networks_on_dhcp_agent(target)) ['networks'] ] ]) for target in targets]) self.logger.debug('Mapping: %s' % mapping) # Any agents alive? if targets: # Schedule networks to them for target in mapping: for network in mapping[target]: self.logger.info( 'Scheduling %s [%s] -> %s/%s [%s].' % (networks[network]['name'], str(network), targets[target]['host'], targets[target]['agent_type'], str(target))) # This can race between multiple rpcdaemon # instances msg = 'Network %s already added to agent %s' % (network, target) self.retryable( lambda: self.client.add_network_to_dhcp_agent( target, {'network_id': network}), retries=1, delay=0, on_fail=lambda x: self.logger.warn(msg)) # No agents, any networks? elif networks: self.logger.warn('No agents found to schedule networks to.')
class L3Agent(NeutronAgent, RPC): def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('L3Agent') # grab relevant settings queue_expire = int(self.config.get('queue_expire', 60)) # Initialize logger self.logger = Logger(name='l3agent', level=self.config['loglevel'], handler=handler) # Parse agent config self.qconfig = Config(self.config['conffile'], 'AGENT') # Initialize super NeutronAgent.__init__(self, self.qconfig, self.config, 'L3 agent') # Initialize RPC bits RPC.__init__(self, connection, exopts={ 'name': self.event_queue(), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-l3_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin', 'queue_arguments': { 'x-expires': queue_expire * 1000, } }) # L3 specific handler def handle(self, agent, state): # All alive agents targets = dict([(target['id'], target) for target in self.agents.values() if target['alive']]) if not targets: self.logger.debug('No agents up; exiting handler early.') return # If agent is down, remove routers first if not state: routerlist = self.retryable( lambda: self.client.list_routers_on_l3_agent(agent['id'] ))['routers'] for router in routerlist: self.logger.info('Removing router %s from %s/%s [%s]' % (router['id'], agent['host'], agent['agent_type'], str(agent['id']))) self.retryable(lambda: self.client.remove_router_from_l3_agent( agent['id'], router['id'])) self.logger.debug('Targets: %s' % targets.keys()) # get all routers routerlist = self.retryable( lambda: self.client.list_routers())['routers'] # Get routers on agents binds = dict([(router['id'], router) for target in targets for router in self.retryable( lambda: self.client.list_routers_on_l3_agent(target)) ['routers']]) self.logger.debug('Bound Routers: %s' % binds.keys()) # And routers not on agents routers = dict([(router['id'], router) for router in routerlist if not router['id'] in binds]) self.logger.debug('Free Routers: %s' % routers.keys()) # Map free routers to agents mapping = zip(routers, cycle(targets)) self.logger.debug('Mapping: %s' % mapping) # Any agents alive? if targets: # Schedule routers to them for router, target in mapping: self.logger.info('Scheduling %s [%s] -> %s/%s [%s].' % (routers[router]['name'], str(router), targets[target]['host'], targets[target]['agent_type'], str(target))) # this can cause errors if multiple rpcdaemons are running msg = 'Router %s already added to agent %s' % (router, target) self.retryable(lambda: self.client.add_router_to_l3_agent( target, {'router_id': router}), retries=1, delay=0, on_fail=lambda x: self.logger.warn(msg)) # No agents, any routers? elif routers: self.logger.warn('No agents found to schedule routers to.')
class DHCPAgent(NeutronAgent, RPC): def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section("DHCPAgent") # grab relevant settings queue_expire = int(self.config.get("queue_expire", 60)) # Initialize logger self.logger = Logger(name="dhcpagent", level=self.config["loglevel"], handler=handler) # Parse agent conf self.qconfig = Config(self.config["conffile"], "AGENT") # Initialize super NeutronAgent.__init__(self, self.qconfig, self.config, "DHCP agent") # Initialize RPC bits RPC.__init__( self, connection, exopts={"name": self.event_queue(), "durable": False, "type": "topic"}, qopts={ "name": "rpcdaemon-dhcp_%s" % uuid4(), "auto_delete": True, "durable": False, "routing_key": "q-plugin", "queue_arguments": {"x-expires": queue_expire * 1000}, }, ) # DHCP specific handler def handle(self, agent, state): # All alive agents targets = dict([(target["id"], target) for target in self.agents.values() if target["alive"]]) networklist = self.retryable(lambda: self.client.list_networks_on_dhcp_agent(agent["id"]))["networks"] # If agent is down, remove networks first if not state: for network in networklist: self.logger.info( "Removing network %s from %s/%s [%s]" % (network["id"], agent["host"], agent["agent_type"], str(agent["id"])) ) # Races between multiple rpc agents can make this # crash msg = "Network %s already removed from agent %s" % (network["id"], agent["id"]) self.retryable( lambda: self.client.remove_network_from_dhcp_agent(agent["id"], network["id"]), retries=1, delay=0, on_fail=lambda x: self.warn(msg), ) self.logger.debug("Targets: %s" % targets.keys()) # Get all networks networks = dict([(network["id"], network) for network in networklist]) self.logger.debug("All Networks: %s" % networks.keys()) # Map agents to missing networks mapping = dict( [ ( target, [ missing for missing in networks if missing not in [ network["id"] for network in self.retryable(lambda: self.client.list_networks_on_dhcp_agent(target))[ "networks" ] ] ], ) for target in targets ] ) self.logger.debug("Mapping: %s" % mapping) # Any agents alive? if targets: # Schedule networks to them for target in mapping: for network in mapping[target]: self.logger.info( "Scheduling %s [%s] -> %s/%s [%s]." % ( networks[network]["name"], str(network), targets[target]["host"], targets[target]["agent_type"], str(target), ) ) # This can race between multiple rpcdaemon # instances msg = "Network %s already added to agent %s" % (network, target) self.retryable( lambda: self.client.add_network_to_dhcp_agent(target, {"network_id": network}), retries=1, delay=0, on_fail=lambda x: self.logger.warn(msg), ) # No agents, any networks? elif networks: self.logger.warn("No agents found to schedule networks to.")
class DHCPAgent(NeutronAgent, RPC): def __init__(self, connection, config, handler=None): # Grab a copy of our config section self.config = config.section('DHCPAgent') # grab relevant settings queue_expire = int(self.config.get('queue_expire', 60)) # Initialize logger self.logger = Logger( name='dhcpagent', level=self.config['loglevel'], handler=handler ) # Parse agent conf self.qconfig = Config(self.config['conffile'], 'AGENT') # Initialize super NeutronAgent.__init__(self, self.qconfig, self.config, 'DHCP agent') # Initialize RPC bits RPC.__init__( self, connection, exopts={ 'name': self.event_queue(), 'durable': False, 'type': 'topic' }, qopts={ 'name': 'rpcdaemon-dhcp_%s' % uuid4(), 'auto_delete': True, 'durable': False, 'routing_key': 'q-plugin', 'queue_arguments': { 'x-expires': queue_expire * 1000, } } ) # DHCP specific handler def handle(self, agent, state): # All alive agents targets=dict([(target['id'], target) for target in self.agents.values() if target['alive']]) if not targets: self.logger.debug('No agents up; exiting handler early.') return networklist = self.retryable( lambda: self.client.list_networks_on_dhcp_agent( agent['id']))['networks'] # If agent is down, remove networks first if not state: for network in networklist: self.logger.info( 'Removing network %s from %s/%s [%s]' % ( network['id'], agent['host'], agent['agent_type'], str(agent['id']) ) ) # Races between multiple rpc agents can make this # crash msg = 'Network %s already removed from agent %s' % ( network['id'], agent['id']) self.retryable( lambda: self.client.remove_network_from_dhcp_agent( agent['id'], network['id']), retries=1, delay=0, on_fail=lambda x: self.logger.warn(msg)) self.logger.debug('Targets: %s' % targets.keys()) # Get all networks networks = dict([(network['id'], network) for network in networklist]) self.logger.debug('All Networks: %s' % networks.keys()) # Map agents to missing networks mapping = dict([(target, [ missing for missing in networks if missing not in [ network['id'] for network in self.retryable( lambda: self.client.list_networks_on_dhcp_agent( target))['networks'] ] ]) for target in targets]) self.logger.debug('Mapping: %s' % mapping) # Any agents alive? if targets: # Schedule networks to them for target in mapping: for network in mapping[target]: self.logger.info( 'Scheduling %s [%s] -> %s/%s [%s].' % ( networks[network]['name'], str(network), targets[target]['host'], targets[target]['agent_type'], str(target) ) ) # This can race between multiple rpcdaemon # instances msg = 'Network %s already added to agent %s' % (network, target) self.retryable( lambda: self.client.add_network_to_dhcp_agent( target, {'network_id': network}), retries=1, delay=0, on_fail=lambda x:self.logger.warn(msg)) # No agents, any networks? elif networks: self.logger.warn('No agents found to schedule networks to.')
class Monitor(DaemonContext): def __init__(self, args=None): # Parse args if args is None: args = {} options, _ = getopt.getopt(sys.argv[1:], 'c:d') options = dict(options) config_file = options.get('-c', '/usr/local/etc/rpcdaemon.conf') daemonize = '-d' not in options # Parse config self.config = Config(config_file, 'Daemon') # Initialize logger self.logger = Logger( name='rpcdaemon', level = self.config['loglevel'], path = self.config['logfile'] if daemonize else None, handler = None if daemonize else logging.StreamHandler() ) self.pidfile = PIDFile(self.config['pidfile']) if daemonize else None; # TOOD: plugin.check thread pool? self.timeout = int(self.config.get('check_interval', 1)) # Clamp in case we exit before worker exists self.worker = None # Initialize daemon DaemonContext.__init__( self, detach_process=daemonize, files_preserve=[self.logger.handler.stream.fileno()], pidfile=self.pidfile, stdout=self.logger.handler.stream, stderr=self.logger.handler.stream ) def open(self): # Call super DaemonContext.open(self) # Needfuls.doit() self.logger.info('Initializing...') # RPC connection self.connection = Connection(self.config['rpchost']) self.logger.info('Loading plugins...') # Import and create plugin objects self.plugins = [ plugin(self.connection, self.config, self.logger.handler) for plugin in [ getattr( __import__( 'rpcdaemon.plugins.' + module.lower(), fromlist=[module] ), module) for module in self.config['plugins'].split() ] ] # Setup worker with plugins and crank it up self.logger.info('Starting worker...') self.worker = Worker(self.connection, self.plugins, handler=self.logger.handler) self.worker.daemon = True # Daemon thread self.worker.start() self.logger.info('Started.') def check(self): if self.worker.is_connected: self.logger.debug('Dispatching plugin checks...') for plugin in self.plugins: plugin.check() def close(self): # We might get called more than once, or before worker exists if self.is_open and self.worker and self.worker.is_alive(): self.logger.info('Stopping worker...') self.worker.should_stop = True self.worker.join(5) # Wait up to 5 seconds if self.worker.is_alive(): self.logger.warn( 'Error stopping worker. Shutting down uncleanly.' ) self.logger.info('Stopped.') DaemonContext.close(self)